summaryrefslogtreecommitdiffstats
path: root/src/arch
diff options
context:
space:
mode:
Diffstat (limited to 'src/arch')
-rw-r--r--src/arch/.gitignore6
-rw-r--r--src/arch/ChangeLog4805
-rw-r--r--src/arch/LICENSE21
-rw-r--r--src/arch/Makefile.am11
-rw-r--r--src/arch/README7
-rw-r--r--src/arch/arm/.gitattributes1
-rw-r--r--src/arch/arm/.gitignore15
-rw-r--r--src/arch/arm/Makefile.am27
-rw-r--r--src/arch/arm/arm-codegen.c193
-rw-r--r--src/arch/arm/arm-codegen.h1127
-rw-r--r--src/arch/arm/arm-dis.c509
-rw-r--r--src/arch/arm/arm-dis.h41
-rw-r--r--src/arch/arm/arm-vfp-codegen.h247
-rw-r--r--src/arch/arm/arm-wmmx.h177
-rw-r--r--src/arch/arm/cmp_macros.th56
-rw-r--r--src/arch/arm/dpi_macros.th112
-rwxr-xr-xsrc/arch/arm/dpiops.sh30
-rw-r--r--src/arch/arm/mov_macros.th121
-rw-r--r--src/arch/arm/tramp.c710
-rw-r--r--src/arch/arm/vfp_macros.th15
-rw-r--r--src/arch/arm/vfpm_macros.th14
-rwxr-xr-xsrc/arch/arm/vfpops.sh24
-rw-r--r--src/arch/arm64/.gitignore6
-rw-r--r--src/arch/arm64/Makefile.am0
-rw-r--r--src/arch/arm64/arm64-codegen.h3
-rw-r--r--src/arch/ia64/.gitignore2
-rw-r--r--src/arch/ia64/Makefile.am3
-rw-r--r--src/arch/ia64/codegen.c861
-rw-r--r--src/arch/ia64/ia64-codegen.h3183
-rw-r--r--src/arch/mips/.gitignore6
-rw-r--r--src/arch/mips/Makefile.am8
-rw-r--r--src/arch/mips/mips-codegen.h435
-rw-r--r--src/arch/mips/test.c159
-rw-r--r--src/arch/ppc/.gitignore7
-rw-r--r--src/arch/ppc/Makefile.am1
-rw-r--r--src/arch/ppc/ppc-codegen.h953
-rw-r--r--src/arch/s390x/.gitignore6
-rw-r--r--src/arch/s390x/ChangeLog35
-rw-r--r--src/arch/s390x/Makefile.am7
-rw-r--r--src/arch/s390x/s390x-codegen.h997
-rw-r--r--src/arch/s390x/tramp.c1149
-rw-r--r--src/arch/sparc/.gitignore3
-rw-r--r--src/arch/sparc/Makefile.am7
-rw-r--r--src/arch/sparc/sparc-codegen.h955
-rw-r--r--src/arch/sparc/test.c123
-rw-r--r--src/arch/sparc/tramp.c1080
-rw-r--r--src/arch/x64/.gitignore4
-rw-r--r--src/arch/x64/Makefile.am2
-rw-r--r--src/arch/x64/x64-codegen.h1938
-rw-r--r--src/arch/x86/.gitignore6
-rw-r--r--src/arch/x86/Makefile.am1
-rw-r--r--src/arch/x86/x86-codegen.h2647
52 files changed, 22856 insertions, 0 deletions
diff --git a/src/arch/.gitignore b/src/arch/.gitignore
new file mode 100644
index 0000000..16c9840
--- /dev/null
+++ b/src/arch/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.la
+/*.lo
diff --git a/src/arch/ChangeLog b/src/arch/ChangeLog
new file mode 100644
index 0000000..c42aa63
--- /dev/null
+++ b/src/arch/ChangeLog
@@ -0,0 +1,4805 @@
+commit e8fa461503cf681fd7f6fffdbe94346cb4a0b94f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 13 13:56:18 2014 -0400
+
+ [runtime] Remove an unused interpreter file.
+
+commit b8e69265771d2d730847add35620628ff003aed1
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Sep 9 09:14:37 2014 -0400
+
+ [cleanup] Remove more old files.
+
+commit 69d89956fcc24cec955246588269cb7c8012b7cb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Sep 1 13:25:07 2014 -0400
+
+ [runtime] Remove the interpreter.
+
+commit a9db0d5b41d17cb7ff5788a63ce0eee1e01652b3
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Tue Jun 3 11:52:00 2014 -0400
+
+ Architectural level set to z10 instruction set
+
+commit edeeadda807c9189ad6b7cdd0f221c355ad95e52
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue Apr 29 16:56:12 2014 +0200
+
+ Add .gitignore file in mono/arch/arm64.
+
+commit 62b813772cfa4af873a278c39dd1f01dc6e50c2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 20:16:47 2014 +0200
+
+ [arm64] Add JIT support.
+
+commit 1d58ec09524d6f4ce37f39698e68fb45a3c0231b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 17:03:21 2014 +0200
+
+ [arm64] Add basic port infrastructure.
+
+commit 12741090edd2230bfd0fac498af3e304680380b4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 1 18:39:05 2014 +0000
+
+ [jit] Implement support for atomic intrinsics on arm.
+
+commit 21ca1bad7d0447bb5d420a58128e1c2733635efa
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Dec 11 11:13:14 2013 -0500
+
+ [arch]Add cvtsi2ss to amd64 codegen.
+
+commit 4a25d5fa1811be15c62979993cd1a37c2891d0a5
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Sat Nov 23 18:26:55 2013 +0100
+
+ Fix the encoding of x86_imul_reg_mem_imm.
+
+commit 43b05e3c36d05526f7a9f3f8767569d026e4f1c6
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Fri Nov 15 15:08:06 2013 +0100
+
+ Fix the `nop` opcode on some MIPS-based Loongson CPUs.
+
+ After much trouble building Mono in Debian/MIPS, @directhex
+ narrowed it down to this issue:
+
+ https://sourceware.org/ml/binutils/2009-11/msg00387.html
+
+ So since some of the 2E and 2F versions of the Loongson CPUs
+ break with a regular `sll zero, zero, 0` we need to issue an
+ `or at, at, 0`. This makes sure we don't randomly deadlock or
+ blow up when the CPU is under heavy load.
+
+ Yes, really.
+
+commit 2f56d471f089b8f514377ce501a0c1643652d639
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 24 23:41:39 2013 +0200
+
+ Merge some Nacl/ARM changes from https://github.com/igotti-google/mono/commit/65d8d68e8c81cf6adb1076de7a9425c84cab86a3.
+
+commit ab6a96ef346220433f9f7967b763a0453d9cbc66
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue May 14 18:27:32 2013 +0200
+
+ Enable hw division/remainder on mt in non-thumb mode as well.
+
+commit 78c1e65942210449d0d1c4957b42242ebc9bdb5a
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue May 14 03:10:43 2013 +0200
+
+ Kill support for the ancient FPA format on ARM.
+
+commit a42bc8f14a3393150fb6fbb772c2b0259267f5ae
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Thu Apr 25 10:01:14 2013 -0400
+
+ Add lazy rgctx support to s390x
+
+commit 92b3dc346aad94e7e6a91e7356adcebbb180c618
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 22 17:54:27 2013 +0200
+
+ Remove obsolete 32 bit s390 backend.
+
+commit 0d9d79945bfc7e791ed39e7519b8769a3c09fe28
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Thu Jan 31 12:48:49 2013 -0800
+
+ NaCl GC improvements
+
+ - inline managed code implementation
+ (add x86 test mem imm8 codegen macro for this as well)
+ - clean up libgc NaCl code
+ - centralize mono_nacl_gc into mini.c
+
+commit a2b380c30f8e12e508d9b761b9b049d17dff3617
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 1 20:27:07 2013 +0100
+
+ Remove the unmaintained and incomplete alpha backend.
+
+commit ddee8bb5125ad07f673a5f9a45ddc629dec8c126
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Feb 26 22:08:26 2013 +0100
+
+ Remove the unmaintained and incomplete hppa backend.
+
+commit 9c434db79ba98565a8dadcfbbe8737621a698589
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 9 17:23:38 2012 -0400
+
+ Use full path for includes as this was braking the cross compiler.
+
+commit 600580c96563f5702acee5a0307432e96731d837
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Oct 4 13:03:06 2012 +0200
+
+ Save fp registers in the ARM throw trampoline, ios has callee saved fp registers, and LLVM generates code which uses them.
+
+commit 0b64268e0a56e3f76063f0b679975be0daaf68b1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Oct 3 10:26:37 2012 +0200
+
+ Use AM_CPPFLAGS instead of INCLUDES in Makefile.am files, as the later is no longer supported, see http://lists.gnu.org/archive/html/automake/2012-08/msg00087.html.
+
+commit f2e43c392dde726d2f1008dfcc8515d34354e968
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 19 01:37:26 2012 +0000
+
+ Save/restore fp registers in MonoContext on ios. Fixes #1949.
+
+commit a841c76b86e38fc8e5db24f152b5fab2501ddf1a
+Author: Iain Lane <iain@orangesquash.org.uk>
+Date: Sun Apr 15 14:49:55 2012 +0100
+
+ Fix ARM printf format problems
+
+ When building with -Werror=format-security on ARM, mono fails to build
+ due to incorrect format strings in arm-dis.c
+
+commit 33426abe6bd7ad8eb37d2f214afe08a0a3d70a0b
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Mon Apr 2 13:30:43 2012 -0400
+
+ s390x-codegen.h - Define s390_SP and s390_BP
+ sgen-major-copy-object.h - Correct assertion test
+ sgen-os-posix.c - Prevent race condition between restarting and suspending a thread
+
+commit c565eab0f9d79f6009c3878eaa190529838b0204
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Mar 12 16:15:46 2012 -0400
+
+ Update some copyrights
+
+commit d711efe0d6403fa49697c304696843a789805112
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Dec 2 06:20:16 2011 +0000
+
+ Ongoing MIPS work. Fix mips_load () to be patchable, fix endianness issue in OP_MIPS_MFC1D, fix OP_JMP. make rcheck runs now.
+
+commit 32a164a381080aee3afa42ea33e31d89579519a4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 16 04:35:31 2011 -0500
+
+ Revert "Add support for hardfp abi on ARM."
+
+ This reverts commit e7055b45b9211fb20021997f7da0fa24992421f5.
+
+commit aaae806b8bd16a82937c9417689aeb82bea0b952
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Nov 9 10:25:48 2011 -0500
+
+ Update two days worth of copyrights, many more missing
+
+commit 96e5ba7724999828facefb30e0982d0be6931bda
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 9 01:13:16 2011 +0100
+
+ Add support for hardfp abi on ARM.
+
+commit c6d53e16991eb2dcc3e4d99a008fdd899d2b78f2
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Fri Aug 5 17:02:45 2011 +0200
+
+ Fix up bugs in x86-codegen for NaCl.
+
+commit 8034d4b8f49485babcbffd12d3e09fd372c00ccb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 6 16:16:16 2011 +0200
+
+ Prefix ARM FPA codegen macros with 'FPA'.
+
+commit d2a95b8feb24584dd528b3deb0f5f1ec5d7766a3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 23 21:33:43 2011 +0200
+
+ Fix out-of-tree builds on arm.
+
+commit d093f6fff2bcaa4ccfc795354b151c7ca1a0c613
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Fri May 6 12:52:19 2011 -0400
+
+ Implement soft debugger for s390x and fix context macro for s390x
+
+commit 4c9723aa3efac03bc33deed252ebda71cbb1ae86
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 12:14:52 2011 +0100
+
+ Fix some warnings.
+
+commit b1a613aca13e03185d0ba49e46fd77fd8eb98fc9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 03:22:52 2011 +0100
+
+ Implement mono_memory_barrier () and OP_MEMORY_BARRIER for ARM.
+
+commit f81e3005a53a10c39f4ca8dd30a2a88719c7d005
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Sun Jan 16 23:40:23 2011 -0500
+
+ Cast result of s390x_emit16/32 to eliminate lots of warning messages
+ Check for wrapper-managed-to-native when assessing call parameters and have emit_prolog use native_size when processing those parameters
+ Signed-off-by: Neale Ferguson <neale@sinenomine.net>
+
+commit 92a55ae009739b5ec652676b8fdd615375c27fc0
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:52:46 2011 +0000
+
+ Implement mono.simd new conversion ops on amd64
+
+commit b7639e01d7603a1e34dd225edb5e99fd2181494b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:40:12 2011 +0100
+
+ Implement a few conversion operations.
+
+ Add conversion operations between 4f, 2d and 4i.
+ Implemented only on x86 for now.
+
+commit f0e5c2be6946491ba052c82794361ec0d33cb04c
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Jan 7 00:19:03 2011 +0000
+
+ AMD64 version of the new mono.simd ops
+
+commit 1aa6254fb828e043ea55d7d3e37b02812e2d9bdf
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 21:36:31 2011 +0100
+
+ Implement Shuffle for 64bits types.
+
+ * x86-codegen.h: Add macro and define to emit pshufpd.
+
+ * mini-ops.h: Add OP_SHUPD.
+
+ * cpu-x86.md:
+ * mini-x86.h: Implement x86 support.
+
+ * simd-intrinsics.c: Handle shuffle on 64bit types.
+
+ * VectorOperations.cs: Add new methods.
+
+commit c1fb94e7e72e58924dcebe8cdfcdbcbe1e65b644
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 18:43:59 2011 +0100
+
+ Add SHUFPS and macro to emit it.
+
+commit 48f5efeb334eb4b6e867c65ae53e21b3c45fd771
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 19:35:45 2011 +0100
+
+ Put back a macro definition accidently removed by the nacl changes.
+
+commit a7074ea55af096913e4bcc8e044be7601bcc55b5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 11:49:32 2011 +0100
+
+ Fix warnings introduced by the NACL merge.
+
+commit 4edb45273377cc0858dab7e12b19026467e796c5
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 16:03:45 2010 -0800
+
+ Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client
+
+commit cfdf246cd2ffd65bd25e09f1d66bb55d57bf8953
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 14:37:36 2010 -0800
+
+ Changes to mono/arch/amd64 for Native Client
+
+commit aa974c33a3cee416fc456053164835acbf81df70
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Sep 24 11:28:46 2010 -0300
+
+ Implement amd64 support for OP_CARDTABLE.
+
+ * amd64-codegen.h (amd64_alu_reg_membase_size): Add support
+ for RIP based addressing.
+
+ * cpu-amd64.md: Add card_table_wbarrier.
+
+ * mini-amd64.c (mono_arch_output_basic_block): Emit the
+ new OP.
+
+ * mini-amd64.c (mono_arch_emit_exceptions): Handle another
+ kind of patch-info - GC_CARD_TABLE_ADDR. This is required
+ because we can neither have 64bits immediates with amd64
+ or 2 scratch regiters with current regalloc.
+
+ * mini-amd64.h: Define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER.
+
+commit 7981b77489eba9fafe98b764ae8c423143e55a25
+Author: Mark Mason <mmason@upwardaccess.com>
+Date: Wed Aug 18 23:39:36 2010 +0800
+
+ Simplify test for MIPS imm16 operands.
+
+ Code contributed under the MIT/X11 license.
+
+commit 881a8fe8dfebf42e0f50228319132001d121c983
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Mon Aug 9 17:40:18 2010 +0200
+
+ Add hooks to the codegen macros to support NACL codegen.
+
+commit da52cebbb28392e8043a36e8c29f4ceb4f706741
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Sun Jul 25 20:09:25 2010 +0530
+
+ EOL handling
+
+ This set of .gitattributes was automatically generated from the list of files
+ that GIT tried to normalize when I enabled automatic EOL conversion.
+
+ With this set of attributes, we prevent automated EOL conversion on files that
+ we know will cause trouble down the road.
+
+commit 80806328ee52ed52783e005f044e8447d34efac5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 19 02:35:46 2010 +0000
+
+ 2010-05-19 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support.
+
+ svn path=/trunk/mono/; revision=157521
+
+commit bb66b04f8ca017660ae65afa4b86a33b32d48cdb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 8 04:41:44 2010 +0000
+
+ .gitignore
+
+ svn path=/trunk/mono/; revision=155025
+
+commit 2b562993a3dced62eb48aeedcf38f234b655e86f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 29 23:21:23 2010 +0000
+
+ 2010-03-30 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/*.sh: Remove bash dependency.
+
+ svn path=/trunk/mono/; revision=154407
+
+commit 977db7f5b92aa4e7b8909f6d2440f3347e548364
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Mar 23 20:00:46 2010 +0000
+
+ Primarily, add support for mono_arch_get_throw_corlib_exception and IMT
+ for s390x. Other s390x fixes to instruction sizes, parameter passing, and ARCH
+ settings.
+
+
+ svn path=/trunk/mono/; revision=154085
+
+commit 282ce11cd7691698334563b95ca4b49e6c32f900
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Nov 20 22:34:30 2009 +0000
+
+ removing PLATFORM_WIN32
+
+ svn path=/trunk/mono/; revision=146652
+
+commit 774d55350115d1c4f08dc2a9b015e9502d796cef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 10 00:58:49 2009 +0000
+
+ 2009-11-10 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the names of the LDMIA/STMIA macros, they don't actually
+ update the base register.
+
+ svn path=/trunk/mono/; revision=145786
+
+commit 568b4a7ab726e87c664a682193fa57c5521ed23c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 14 13:49:01 2009 +0000
+
+ 2009-08-14 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Add armv6 MOVW/MOVT.
+
+ svn path=/trunk/mono/; revision=139918
+
+commit c4d98f3131b6b7d0732050c2e0ac7bd05b6c27c2
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Tue Aug 4 00:31:14 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * mono/arch/amd64/amd64-codegen.h: Added missing code gen marco for single packed square root.
+ * mono/mini/basic-simd.cs: added test for packed double square root.
+ * mono/mini/cpu-amd64.md: added opcode info for packed double square root.
+ * mono/mini/cpu-x86.md: added opcode info for packed double square root.
+ * mono/mini/mini-ops.h: added IR opcode for packed double square root.
+ * mono/mini/mini-x86.c: added IR to native translation code for packed double square root.
+ * mono/mini/mini-amd64.c: removed todo for packed double square root.
+ * mono/mini/simd-intrinsics.c: added method to IR opcode converstion for
+ packed double square root.
+
+ svn path=/trunk/mono/; revision=139309
+
+commit fc5d2d293fe800d860e9af4fcd9b19f9be7d4e17
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 24 15:00:25 2009 +0000
+
+ Fri Jul 24 16:54:13 CEST 2009 Steven Munroe <munroesj@us.ibm.com>
+
+ This patch is contributed under the terms of the MIT/X11 license
+
+ * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted
+ conversion to support combining addis for bits 32-47 with
+ signed load/store diplacements for bits 48-63.
+ (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32.
+ These instructions are availble to 32-bit programs on 64-bit
+ hardware and 32-bit both starting with PowerISA V2.01.
+ [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6
+ native mode.
+ [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for
+ ppc32.
+
+
+ svn path=/trunk/mono/; revision=138635
+
+commit f44bc9e40cc840bf63bf782aa0338aae3e898f7f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 20 20:45:49 2009 +0000
+
+ 2009-07-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding
+ of this instruction.
+
+ svn path=/trunk/mono/; revision=138242
+
+commit 88ccf5c589b23d6e79ea5a588d3986693b09879a
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 13 21:58:58 2009 +0000
+
+ 2009-07-13 Zoltan Varga <vargaz@gmail.com>
+
+ * x86/x86-codegen.h: Applied patch from Marian Salaj <salo3@atlas.cz>.
+ Fix encoding of PMINSW and PMINSD. Fixes #521662.
+
+ svn path=/trunk/mono/; revision=137821
+
+commit 64d366eddf3b1c93bcaaff2190fa1cc2b01f7f03
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jul 10 22:35:07 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+ * amd64/amd64-codegen.h: Fix bugs in simd marcos.
+
+ svn path=/trunk/mono/; revision=137736
+
+commit d7fa5cedae9e4859b340ee29e997dfd48b45ce6e
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:25:11 2009 +0000
+
+ Fix wrong date in my entry to ChangeLog files. Sorry! :((
+
+ svn path=/trunk/mono/; revision=136786
+
+commit 1c634ebda21ddf5392c9d8edd030323d1ad85962
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:19:29 2009 +0000
+
+ mini-amd64.c: Added code to convert simd IR to native amd64 sse.
+ amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+
+ svn path=/trunk/mono/; revision=136785
+
+commit bb994071dcc42ba150d88776fe70f8d35fc522a9
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jun 23 23:55:26 2009 +0000
+
+ Fix LCONV_TO_xx and ICONV_TO_xx. Fix leave_method dump of returned
+ structure. Fix formatting.
+ Correct instruction lengths.
+ Add new instructions.
+
+ svn path=/trunk/mono/; revision=136748
+
+commit f48a4f5a13745caf5350d6f190efb97ec6b605ef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:25:02 2009 +0000
+
+ Fix a few uses of ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136607
+
+commit 4ecc9d712b82d78c853e574edc0345c85bfcd660
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:24:56 2009 +0000
+
+ Fix a few uses of ppc_load_reg/ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136606
+
+commit 40c668ecb1553ffb7b6575b439b3ff8420265cd8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:22:10 2009 +0000
+
+ 2009-06-22 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Rework the naming of the load/store macros,
+ ldr/str now handle register sized quantities, while ldptr/stptr handle
+ pointer sized quantities.
+
+ svn path=/trunk/mono/; revision=136604
+
+commit cf0e113f7dd91ff8b46e35047cc48c2e5ece925c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 18:47:03 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside
+ macros.
+
+ svn path=/trunk/mono/; revision=136548
+
+commit 3858973d0bd980206ea3725a2e74f2a336aa1aa1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 13:04:42 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities.
+ Handle little endian host platforms in ppc_emit32.
+
+ svn path=/trunk/mono/; revision=136539
+
+commit 9629536810d07a63b980a29912eaf3df7313fee9
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jun 12 17:33:11 2009 +0000
+
+ Add marcos for coding two byte SIMD/SSE opcodes. Added comments to help tell the different types of SSE code gen marcos appart.
+
+ svn path=/trunk/mono/; revision=136018
+
+commit 76cddabf0319c7be9fae2b6c532aafe6587fafbc
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Apr 22 23:59:10 2009 +0000
+
+ merge
+
+ svn path=/trunk/mono/; revision=132427
+
+commit 965b554666f2999b9e01dd731b1134af1cfcd5fa
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 6 15:09:57 2009 +0000
+
+ 2009-04-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD.
+
+ svn path=/trunk/mono/; revision=131125
+
+commit 7b7235494cabe7c5a796fafd6297070f993b03a8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 22:37:35 2009 +0000
+
+ 2009-04-03 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Add macros for decoding the SIB byte.
+
+ svn path=/trunk/mono/; revision=130910
+
+commit 9f497af70ef5ed9244ffbe9a6263f7d077136148
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 00:50:47 2009 +0000
+
+ 2009-04-02 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add missing VFP codegen macros.
+
+ svn path=/trunk/mono/; revision=130817
+
+commit 7c682141c5861685e5b0efdcc1f337083657cf9d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 6 15:55:12 2009 +0000
+
+ 2009-03-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing
+ a file in the middle of a function.
+
+ svn path=/trunk/mono/; revision=128782
+
+commit a7f6dd7620d7c440216c0f156bcd969a28a592d4
+Author: Martin Baulig <martin@novell.com>
+Date: Sat Feb 28 14:36:50 2009 +0000
+
+ Create .gitignore's.
+
+ svn path=/trunk/mono/; revision=128265
+
+commit 22e6e9728faa11a87a7f6f0f0ff0f0f8ef754c03
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 27 06:21:52 2009 +0000
+
+ 2009-02-27 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are
+ autogenerated.
+
+ svn path=/trunk/mono/; revision=128179
+
+commit c70f15fc12afeb73f19d4ff18cf11b7289d76c4f
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Feb 2 23:32:58 2009 +0000
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * ppc/ppc-codegen.h: Make operand order and case consistent
+ (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs,
+ ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz,
+ ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu,
+ ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw,
+ ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use
+ "i" or "ui" instead of "d" for immediated operands to immediate
+ arthimetic and logical instructions in macros ppc_addi, ppc_addis,
+ ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd.
+ [__mono_ppc64__]: Make operand order and case consistent
+ (assembler order) for ppc_load_multiple_regs,
+ ppc_store_multiple_regs. Simplify the DS form and make them
+ consistent with D forms for ppc_load_reg, ppc_load_reg_update,
+ ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu,
+ ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux.
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * exceptions-ppc.c (restore_regs_from_context): Correct operand
+ order (offset then base reg) for ppc_load_multiple_regs.
+ (emit_save_saved_regs) Correct operand order for
+ ppc_store_multiple_regs.
+ (mono_arch_get_call_filter): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * mini-ppc.c (emit_memcpy): Fix operand order for
+ ppc_load_reg_update and ppc_store_reg_update.
+ (mono_arch_output_basic_block): Correct operand order for ppc_lha.
+ (mono_arch_emit_epilog): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * tramp-ppc.c (mono_arch_create_trampoline_code): Correct operand
+ order for ppc_store_multiple_regs and ppc_load_multiple_regs.
+
+ svn path=/trunk/mono/; revision=125443
+
+commit f228d47d2afc549321cec800466e6bc1cde631bb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 19 19:47:54 2009 +0000
+
+ 2009-01-19 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add x86_movd_xreg_membase.
+
+ svn path=/trunk/mono/; revision=123825
+
+commit 792160756d6ef76711408f151838c3f5a5f8d83b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 19 19:46:04 2008 +0000
+
+ 2008-12-19 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Fixed the argument order for lwzu in
+ ppc_load_reg_update.
+
+ svn path=/trunk/mono/; revision=121883
+
+commit 344a06253c9c1bad287e160b9714b0a052e68a09
+Author: Mark Mason <glowingpurple@gmail.com>
+Date: Sat Dec 13 06:54:25 2008 +0000
+
+ 2008-12-12 Mark Mason <mmason@upwardaccess.com>
+
+ * mips/mips-codegen.h: Changes to support n32.
+
+ Contributed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=121488
+
+commit 2dcc1868b2e2e830a9fa84a445ee79a8f6ab38ba
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Wed Dec 10 09:33:57 2008 +0000
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Merged with mini-ppc64.c.
+
+ * mini-ppc.h: Define PPC_MINIMAL_PARAM_AREA_SIZE on all targets.
+
+ * Makefile.am: Use the same sources for PPC and PPC64.
+
+ * mini-ppc64.c: Removed.
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64
+ merge.
+
+ svn path=/trunk/mono/; revision=121203
+
+commit 77eff8936b5e423be2712ba66cd8baba0edd2795
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 20:57:02 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Some simple merges from mini-ppc64.c.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence.
+ Added ppc_compare_log.
+
+ svn path=/trunk/mono/; revision=120890
+
+commit dd397c9fd311f0411694ff1cc7904aec14f4551b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 16:42:24 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc.c, mini-ppc.c, mini-ppc.h: Merged tramp-ppc.c with
+ tramp-ppc64.c.
+
+ * Makefile.am: Use tramp-ppc.c instead of tramp-ppc64.c.
+
+ * tramp-ppc64.c: Removed.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added
+ ppc_load/store_multiple_regs and ppc_compare_reg_imm.
+
+ svn path=/trunk/mono/; revision=120852
+
+commit 7f226f68fb98684dafd132d90ca1a24635c33557
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Dec 2 16:03:45 2008 +0000
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc64.c (mono_arch_create_rgctx_lazy_fetch_trampoline):
+ Fix trampoline size.
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: A few floating point
+ conversion opcodes are implemented natively instead via emulation.
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Opcodes for floating point conversions from
+ 64 bit integers.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=120492
+
+commit 742361c7bfc21faf8485d20d00cdfc58c04800f9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 28 19:06:34 2008 +0000
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h: Enable generalized IMT thunks and
+ make them work.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * object.c: Don't put function descriptors into generalized IMT
+ thunks.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: #define for the maximum length of a load
+ sequence.
+
+ svn path=/trunk/mono/; revision=120248
+
+commit b45b096d6d4246f16d05e42838122f1d58f875f6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 21 00:21:53 2008 +0000
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: Several fixes. Now
+ PPC64 passes basic-long.exe.
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit
+ values.
+
+ svn path=/trunk/mono/; revision=119560
+
+commit dc227de13e4f1cee33c379401adbb90a225e680a
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:45:00 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD.
+
+ svn path=/trunk/mono/; revision=119549
+
+commit 01e12b57e8773f9c65c64a91f956b0fa9335d095
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:44:44 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants.
+
+ * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm.
+
+ * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently.
+
+ svn path=/trunk/mono/; revision=119545
+
+commit 96ed3f7c4ea51c61ec3b5d0600c32fa003b8e4f7
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:36:13 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * decompose.c: Decompose carry and overflow add on PPC64 like on
+ other 64 bit archs. Don't decompose sub at all on PPC64.
+
+ * mini-ppc64.c, exceptions-ppc64.c, tramp-ppc64.c, cpu-ppc64.md:
+ Several fixes and new opcodes. Now PPC64 runs (but doesn't pass)
+ basic-long.exe.
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in
+ ppc_load_func to fix the 2 bit shift.
+
+ svn path=/trunk/mono/; revision=119516
+
+commit 14651d4fa6b039131000aa5157ed99b7526f89b8
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:27:36 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: 64 bit division opcodes.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119515
+
+commit daa4af175e0f8b95888918dbf429c7d5f66d3c07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 20 14:28:51 2008 +0000
+
+ 2008-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only
+ used by the interpreter.
+
+ svn path=/trunk/mono/; revision=119444
+
+commit 3225dc9308230de9fbbca884c05e6b150a8e0333
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 14:12:04 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PEXTR B/W/D.
+
+ svn path=/trunk/mono/; revision=119441
+
+commit 5c317c4676f911a0620b54e6668cf66a5c0dda31
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:58 2008 +0000
+
+ 2008-11-18 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PINSR B/W/D.
+
+ svn path=/trunk/mono/; revision=119229
+
+commit b31b375fc1354cc835d183e7e251e602eeb038c5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:49 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ.
+
+ * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg
+ macro.
+
+ svn path=/trunk/mono/; revision=119227
+
+commit dbebfad82832bf895561902dd527d2e4c158c2c9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 15:32:41 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Macro for nop added.
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, tramp-ppc64.c, cpu-ppc64.md: Changes
+ for PPC64. An empty program runs now.
+
+ svn path=/trunk/mono/; revision=119162
+
+commit 406790f1df77c80b5b28bcac561e7b6c6cd1a3a6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:25:11 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: PPC64 code generation macros.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119141
+
+commit 484dbedc8136e413a77ee11938d40e713cfefcfd
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:17:36 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few fixes and additions.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119140
+
+commit 74b70bd5f7bc3b40a919c6c8b06c22facae8df6b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 17 17:00:22 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant
+ and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros.
+
+ svn path=/trunk/mono/; revision=119057
+
+commit 59483983e37bb55af19f4e98e3de2f1ad216989b
+Author: Andreas Färber <afaerber@mono-cvs.ximian.com>
+Date: Sat Nov 15 10:59:47 2008 +0000
+
+ 2008-11-15 Andreas Faerber <andreas.faerber@web.de>
+
+ * ppc/test.c: Add support for Mac OS X.
+
+ This commit is licensed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=118924
+
+commit 6c930cb35aa08e10abba989d9cb8560b4636ba73
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 13 22:51:27 2008 +0000
+
+ 2008-11-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg.
+
+ svn path=/trunk/mono/; revision=118779
+
+commit bfe79f71f1352fbbfb696de3b0c093562b6fefb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 4 20:17:31 2008 +0000
+
+ 2008-11-04 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add store nta.
+
+ svn path=/trunk/mono/; revision=117921
+
+commit 42f47d048391da1619aa26b70e54980c4c33e3f2
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 3 14:41:44 2008 +0000
+
+ 2008-11-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add prefetch instruction
+ and x86_sse_alu_reg_membase macro.
+
+ svn path=/trunk/mono/; revision=117753
+
+commit eaf2804839ffb61912a8eeef7c3a58463aafcdd6
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 19:24:34 2008 +0000
+
+ 2008-10-28 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add long version of the packed integer
+ ops.
+
+ svn path=/trunk/mono/; revision=117292
+
+commit 3fffcb4ac5879f2655ee3b4b3bee093a9eaa5016
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 00:05:56 2008 +0000
+
+ 2008-10-27 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movddup.
+
+ svn path=/trunk/mono/; revision=117220
+
+commit bf9bec59fad96b9a7cb38921c26bb1c176fe40ce
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 21:58:17 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed pack with saturation.
+
+ svn path=/trunk/mono/; revision=116995
+
+commit 2ffed07a8205616ea4a1605338f08c8ad6c77432
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 13:36:53 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed mul high.
+
+ svn path=/trunk/mono/; revision=116936
+
+commit 2b6070d8bbd583f6bb90e02f3961252ef0854da8
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Oct 24 01:02:49 2008 +0000
+
+ remove temporary/generated files
+
+ svn path=/trunk/mono/; revision=116902
+
+commit 7a2889c2ce0cfbc193324b64764a02e42f5daee8
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 00:35:54 2008 +0000
+
+ 2008-10-23 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation
+ and compare greater.
+
+ svn path=/trunk/mono/; revision=116896
+
+commit 600a42f70b41a94712aac746e44f2bba885dfc1f
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 20 19:36:04 2008 +0000
+
+ 2008-10-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add multiply and store high.
+
+ svn path=/trunk/mono/; revision=116545
+
+commit 454b5617264c1bb64ff7296669db98a14cc58118
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 17 17:41:14 2008 +0000
+
+ 2008-10-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int shuffle.
+
+ svn path=/trunk/mono/; revision=116265
+
+commit 8336fe34234402529da0e46af634948d678ee649
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 16 23:22:27 2008 +0000
+
+ 2008-10-16 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int compare equals and
+ psabw.
+
+ svn path=/trunk/mono/; revision=116117
+
+commit 0a6e6df8d766d7ad1b21d6c234826293d1317979
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Oct 15 20:52:54 2008 +0000
+
+ 2008-10-15 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask.
+
+ svn path=/trunk/mono/; revision=115919
+
+commit ec2240eaee83b7c5ff444e0708a114458394d55b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 14 15:02:05 2008 +0000
+
+ 2008-10-14 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movsldup and movshdup.
+
+ svn path=/trunk/mono/; revision=115785
+
+commit 7ed9633867d31f5dd5fd971611f952574c005a87
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 22:13:15 2008 +0000
+
+ 2008-10-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add remaining FP sse1 ops.
+ Add sse ps encoding with imm operand.
+ Add remaining sse1 ops.
+
+ svn path=/trunk/mono/; revision=115699
+
+commit 18f1e82ca6ebaf0929f654a56ab9ddfadfacacb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 01:13:10 2008 +0000
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macro for sse41 ops.
+ Add defined for pack ops, dword shifts/mul/pack.
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * basic-simd.cs: Remove PackWithUnsignedSaturation tests as it turns out
+ that the packuswb/packusdw don't work with unsigned numbers for what
+ would be negative numbers in signed format.
+
+ * cpu-x86.md: Add doubleword forms of many ops and packing ones.
+ Fix the len of fconv_to_r8_x and xconv_r8_to_i4.
+
+ * mini-ops.h: Add doubleword forms of many ops and packing ones.
+
+ * mini-x86.c: Emit doubleword forms of many ops and packing ones.
+
+ * simd-intrinsics.c (SimdIntrinsc): Rename the flags field to simd_version.
+
+ * simd-intrinsics.c (vector4f_intrinsics): Use simd_version field for sse3 ops.
+
+ * simd-intrinsics.c (vector4u_intrinsics): Rename to vector4ui_intrinsics and
+ add more ops.
+
+ * simd-intrinsics.c (simd_version_name): New function, returns the name of the
+ version as the enum in mini.h.
+
+ * simd-intrinsics.c (emit_intrinsics): Instead of having a special emit mode
+ for sse3 ops, check the simd_version field if present. This way the code
+ works with all versions of sse.
+
+ svn path=/trunk/mono/; revision=115610
+
+commit 494ea4f86907f393c8f0ba660edb100a107a8c80
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Oct 11 05:26:06 2008 +0000
+
+ 2008-10-11 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support.
+
+ svn path=/trunk/mono/; revision=115509
+
+commit ba0739c0dc1dd6713f6127160dcee501b105c300
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 10 21:55:37 2008 +0000
+
+ 2008-10-10 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets.
+
+ svn path=/trunk/mono/; revision=115494
+
+commit 5de452f7ff84e26bd22b86205a1cdb9fc207fe75
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 9 18:28:16 2008 +0000
+
+ 2008-10-09 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros for sse shift, pack, unpack,
+ saturated math and packed byte/word/dword math.
+
+ svn path=/trunk/mono/; revision=115367
+
+commit 922c5a03dc6cd66147b1c6bfeb8c1045176618da
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 3 14:28:09 2008 +0000
+
+ 2008-10-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros and enum for SSE instructions.
+
+ svn path=/trunk/mono/; revision=114751
+
+commit f2d756dab8d08c009df41d94eb21fdf427a8e01a
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sat Sep 27 13:02:48 2008 +0000
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings.
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Compiler warning fixes.
+
+ svn path=/trunk/mono/; revision=114279
+
+commit 386d8b482a7e399e4e8d130dd0d2d2ab405068ae
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sun Sep 7 10:25:11 2008 +0000
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * marshal.c (mono_type_native_stack_size): Treat
+ MONO_TYPE_TYPEDBYREF like MONO_TYPE_VALUETYPE.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * method-to-ir.c (mono_method_to_ir2): Disable tail calls for PPC
+ until they're implemented properly.
+
+ * exceptions-ppc.c: Use arch-independent exception-handling code
+ instead of custom one.
+
+ * exceptions-ppc.c, mini-ppc.c, mini-ppc.h: Bug fixes and changes
+ for Linear IR.
+
+ * tramp-ppc.c, mini-ppc.c: Fixed warnings.
+
+ * decompose.c, aot-runtime.c, aot-compiler.c: PPC code also
+ applies when __powerpc__ is defined.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * libtest.c: Darwin structure alignment also applies to PPC.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some
+ warnings.
+
+ svn path=/trunk/mono/; revision=112455
+
+commit 5c8178c1e6cf4d2370c865c6bc66995ca1174eb9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Jun 16 09:37:01 2008 +0000
+
+ 2008-06-16 Mark Probst <mark.probst@gmail.com>
+
+ * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro
+ nobody uses.
+
+ svn path=/trunk/mono/; revision=105886
+
+commit ecbcbb317678440e62a13e16820f95f6ea2dff3d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jun 6 02:08:56 2008 +0000
+
+ 2008-06-06 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the
+ instructions recommended by the amd64 manual.
+
+ svn path=/trunk/mono/; revision=105134
+
+commit 0ded1416da01e39a6c4a33fc9798123d4021fe4d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 14:18:56 2008 +0000
+
+ 2008-04-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of
+ win64.
+
+ svn path=/trunk/mono/; revision=101210
+
+commit cb1954322f73b8d1b0a6836c5242b05538ed72dd
+Author: Jb Evain <jbevain@gmail.com>
+Date: Sun Apr 13 11:44:22 2008 +0000
+
+ last merge 100420:100549
+
+ svn path=/branches/jb/ml2/mono/; revision=100550
+
+commit a977d5e7585e338491944fc87b5e018891eedd93
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Wed Mar 12 17:08:32 2008 +0000
+
+ In .:
+ 2008-03-13 Geoff Norton <gnorton@novell.com>
+
+ * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX
+
+
+ svn path=/trunk/mono/; revision=98063
+
+commit 8c6ca9f3fda169feccab289ecd181e06bcc8e133
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 18 18:25:24 2008 +0000
+
+ 2008-02-18 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro.
+
+ svn path=/trunk/mono/; revision=96092
+
+commit 7a7cef000b9d59672b47c0fcdf75bd1fc00b8c78
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 14 14:21:56 2008 +0000
+
+ 2008-02-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro.
+
+ svn path=/trunk/mono/; revision=95633
+
+commit 9cbc23b5ee9e4f2dca88f8418d11be97079c25a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 8 14:28:06 2008 +0000
+
+ 2008-02-08 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes
+ so they are consistent.
+
+ svn path=/trunk/mono/; revision=95254
+
+commit b951542a9ead8a408c6560a0ffad28a5ade9670d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:12:46 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true.
+
+ svn path=/trunk/mono/; revision=93834
+
+commit 95aa5dc93dbfbcf10125032ecde0e5eabc969a98
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:10:14 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Only set this on arm.
+
+ svn path=/trunk/mono/; revision=93833
+
+commit 11c84542edf07ed41b831c12058f9a0bdd83df93
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 20 17:45:36 2007 +0000
+
+ 2007-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller
+ instruction encoding.
+
+ svn path=/trunk/mono/; revision=90005
+
+commit b15fabef0c7798e4850432910d97e0249cd691fc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Nov 10 15:22:00 2007 +0000
+
+ 2007-11-03 David S. Miller <davem@davemloft.net>
+
+ * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi
+ can be used if the constant value only has the top 22 bits set.
+
+ svn path=/trunk/mono/; revision=89409
+
+commit e22c1134d1553f6da21c1ef50ab4afb009d7c215
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Mon Nov 5 22:28:08 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+
+ svn path=/trunk/mono/; revision=88931
+
+commit ad3b3601f5c113df825c3d2e09fb03b5aa4d1208
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Thu Nov 1 19:03:16 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+ svn path=/trunk/mono/; revision=88673
+
+commit 8991f4a9503167171a0ad5e745d71ec4bd8b846c
+Author: Jonathan Chambers <joncham@gmail.com>
+Date: Fri Oct 26 14:41:54 2007 +0000
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * mini-amd64.c: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ * mini-amd64.h: Add %rdi and %rsi to MonoLMF structure
+ on Win64. Fix intrinsic, use _AddressOfReturnAddress
+ instead of non-existant _GetAddressOfReturnAddress.
+
+ * tramp-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Save/restore %rdi and %rsi in MonoLMF.
+
+ * exceptions-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Modify (throw_exception) signature to take
+ %rdi and %rsi on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+
+ svn path=/trunk/mono/; revision=88258
+
+commit 118f4540a2da9cdb72debfb786a9930e93f2a10b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Oct 9 00:12:58 2007 +0000
+
+ 2007-10-09 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary
+ rex prefix which trips up valgrind.
+
+ svn path=/trunk/mono/; revision=87140
+
+commit e43f3ebed2b5b54c47b5f8ce458788dce0ef97dc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 14 14:04:54 2007 +0000
+
+ 2007-07-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Remove some unused rex prefixes.
+
+ svn path=/trunk/mono/; revision=81979
+
+commit 25f0e1d2bd61097c008fa88e4a114884bb6fe0c9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 4 13:17:45 2007 +0000
+
+ Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added minimal sse instructions currently
+ needed by the JIT.
+
+
+ svn path=/trunk/mono/; revision=81331
+
+commit e971b6ec5cf03043dc227759fced05d5786964d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 13 17:41:53 2007 +0000
+
+ 2007-06-13 Randolph Chung <tausq@debian.org>
+
+ * hppa/hppa-codegen.h: Update with more instructions.
+ * hppa/tramp.c: Disable for linux since we don't support the
+ interpreter.
+
+
+ svn path=/trunk/mono/; revision=79463
+
+commit 26169bb71cd30b373975373952fb11d7a26b0cca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 20 19:41:51 2007 +0000
+
+ 2007-05-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed.
+
+ svn path=/trunk/mono/; revision=77730
+
+commit a024b2405701bbee2003e46a0f9b0e2c0486033c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 23 11:31:33 2007 +0000
+
+ 2007-04-23 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha port work from
+ Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=76103
+
+commit 5ca5ea86f1ff85953c28e0ba3b657268cd2cdfba
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Sun Apr 15 09:11:00 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+ * mini-s390.c: Correct checking for enum type in return value processing.
+
+ svn path=/trunk/mono/; revision=75718
+
+commit 9159abc7ec906d64a15eee8e02b9e5b3f2cce87d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Thu Apr 12 20:45:34 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+
+ svn path=/trunk/mono/; revision=75663
+
+commit b7fd657ee94257eeec946fa9eb11b3f60e7e33e6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 12 16:07:56 2007 +0000
+
+ Mon Mar 12 17:07:32 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * amd64/amd64-codegen.h: removed some useless size rex prefixes.
+
+
+ svn path=/trunk/mono/; revision=74128
+
+commit 0ba3e4bdd057c7a0d25767f7647a00f07683b44c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 24 20:01:27 2007 +0000
+
+ Wed Jan 24 21:00:40 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: fixed encoding of short/byte load/store
+ instructions with negative immediate offsets.
+
+
+ svn path=/trunk/mono/; revision=71622
+
+commit 0251f000fba5c8f99bec6c33beae0c2aabe66451
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 23 17:11:29 2007 +0000
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+ svn path=/trunk/mono/; revision=71523
+
+commit 8e25ae408b9d1836130807d3f465023347051332
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Dec 22 22:51:15 2006 +0000
+
+ Patch from Sergey Tikhonov <tsv@solvo.ru>
+
+ Mono on Alpha updates:
+
+ - Code cleanup
+ - Some hacks to support debugger
+ - updates for "linears" optimization
+
+
+ svn path=/trunk/mono/; revision=69976
+
+commit edd2746e20c982e094abfd547afad74d8e7d2302
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 20 16:37:26 2006 +0000
+
+ Mon Nov 20 17:36:45 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: added suppot for thumb interworking instructions.
+
+
+ svn path=/trunk/mono/; revision=68201
+
+commit b63503e7c4b5ebb8baafb5b58ec69395146db022
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 15 16:00:09 2006 +0000
+
+ Wed Nov 15 16:56:53 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * mips/*: fixes by Mark E Mason <mark.e.mason@broadcom.com>.
+
+
+ svn path=/trunk/mono/; revision=67929
+
+commit 6f8d67005785ba86e81ac930325767d0b270a070
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 10 18:42:10 2006 +0000
+
+ Typo fixes.
+
+ svn path=/trunk/mono/; revision=67683
+
+commit f99322f3ea7b7be85ac63c87c664aafb7f5e17bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Oct 11 21:34:24 2006 +0000
+
+ 2006-10-11 Sergey Tikhonov <tsv@solvo.ru>
+
+ * atomic.h: Fix atomic decrement.
+
+ * mini/cpu-alpha.md: Use native long shift insts
+
+ * mono/mono/mini/tramp-alpha.c: Implemented
+ mono_arch_patch_delegate_trampoline method
+
+ * Started work on using global registers
+
+ * Use byte/word memory load/store insts if cpu supports it
+
+ * Code clean up
+
+
+
+
+ svn path=/trunk/mono/; revision=66573
+
+commit 538fd0794b9ef24f7c765891ed682fc947cf8e85
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 13:02:59 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=65305
+
+commit 0689ca5f72fa8cb03fb1b565a31c4e2b22774a64
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 12 11:10:42 2006 +0000
+
+ Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: VFP floating point format code generation support.
+
+
+ svn path=/trunk/mono/; revision=65295
+
+commit deacad246a936216f09a81b9881c6780de8dd406
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 10:05:29 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops.
+
+ svn path=/trunk/mono/; revision=65289
+
+commit 207e90216277d1d1ee0e6cd37f183440c8c39a26
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:10:43 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg.
+
+ svn path=/trunk/mono/; revision=62746
+
+commit 8f58fa13418008cb86a8ba450a894b23efc4574e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:09:09 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from
+ Sergey Tikhonov <tsv@solvo.ru>. Updates to alpha support.
+
+ svn path=/trunk/mono/; revision=62745
+
+commit ef8021400f045f835fcf70baf5ba5880fe6eca93
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 15 15:00:59 2006 +0000
+
+ Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: reduce noisy build warnings by
+ casting to the more commonly used unsigned char type
+ (from johannes@sipsolutions.net (Johannes Berg)).
+
+
+ svn path=/trunk/mono/; revision=61757
+
+commit de54a3e44b1214298b39386b49e1ca992176e2e4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 14 18:51:25 2006 +0000
+
+ 2006-05-14 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this
+ opcode.
+
+ svn path=/trunk/mono/; revision=60695
+
+commit 3b274ddc5c946640a4c0d6a7b2dee13cd2f5096d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Apr 21 14:51:24 2006 +0000
+
+ 2006-04-21 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old
+ behaviour.
+
+ svn path=/trunk/mono/; revision=59758
+
+commit e830aadb2febf62051b8fc162884a909087cfe4e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Apr 12 19:02:09 2006 +0000
+
+ 2006-04-12 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro.
+
+ svn path=/trunk/mono/; revision=59415
+
+commit a65cd014e420a38b47e00f5c6f9ce590fc00987b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 4 13:18:49 2006 +0000
+
+ 2006-04-04 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the
+ interpreter.
+
+ svn path=/trunk/mono/; revision=59009
+
+commit 0d566f3cb37ddf731fba6cfce9741e2224a13d77
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Mar 13 22:03:39 2006 +0000
+
+ * s390x-codegen.h: Fix immediate checks.
+
+ svn path=/trunk/mono/; revision=57914
+
+commit 15bc8b574c91bfaa40cd1d83374d0179148b5894
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jan 6 18:52:21 2006 +0000
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+ * mini-s390x.c, inssel-s390x.brg, cpu-s390x.md: Fix ATOMIC_I8
+ operations. Provide initial support for OP_ABS.
+
+ svn path=/trunk/mono/; revision=55158
+
+commit 1092c74e7a468b7761df92c2dc0dd2f2b49f21e6
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 3 19:40:34 2006 +0000
+
+ * mono/io-layer/ChangeLog, mono/io-layer/atomic.h, mono/mini/mini-s390x.c,
+ mono/mini/mini-s390x.h, mono/mini/exceptions-s390x.c,
+ mono/mini/ChangeLog, mono/mini/s390-abi.cs, mono/mini/tramp-s390x.c,
+ mono/mini/inssel-s390x.brg, mono/mini/cpu-s390x.md, mono/mini/mini-codegen.c
+ mono/mini/basic-long.cs, mono/mini/Makefile.am, mono/arch/s390x/ChangeLog
+ mono/arch/s390x/s390x-codegen.h: 64-bit s390 support
+
+ svn path=/trunk/mono/; revision=55020
+
+commit 417b7fbe8f810e8fd62b2cb805164a3b80a536d6
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Dec 22 20:18:18 2005 +0000
+
+ 2005-12-22 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_membar): Add membar instruction.
+
+ svn path=/trunk/mono/; revision=54750
+
+commit 259b4749eaf68bfd6818ab38df91e37239c5dd45
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 13 19:12:20 2005 +0000
+
+ Continuing to bring s390 up to current levels
+
+ svn path=/trunk/mono/; revision=54312
+
+commit f5fc186c01c764705e303b3783bf06e507e54640
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Dec 13 13:57:51 2005 +0000
+
+ Avoid lvalue pointer casts.
+
+ svn path=/trunk/mono/; revision=54279
+
+commit ab97bc8d9e311f447d9f4a78e5a28ef6ff9b82ad
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 30 18:06:59 2005 +0000
+
+ 2005-10-30 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_m17): Fix a warning.
+
+ svn path=/trunk/mono/; revision=52399
+
+commit bb6893fc1e1854a8c9f848dfbfbc2dd00bde8735
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 16 15:21:39 2005 +0000
+
+ 2005-10-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp.
+
+ svn path=/trunk/mono/; revision=51764
+
+commit 0b2d13a625bfd03f8d24538ef48870daed540ee3
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 21:25:31 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51443
+
+commit 2bba48015b516fd326cd082eb85325aa5b7676bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 20:36:01 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51434
+
+commit 749c9989f64683d8363481304647924ec1d910af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 27 13:25:16 2005 +0000
+
+ Another compilation fix.
+
+ svn path=/trunk/mono/; revision=50857
+
+commit 64dbeb6e048aa9654800624a74e9c58065cf01ea
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Sep 27 09:09:41 2005 +0000
+
+ * arm/dpiops.sh, arm/fpaops.h: Output to stdout.
+ * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix
+ for srcdir != builddir.
+
+ svn path=/trunk/mono/; revision=50833
+
+commit 7c363c19299d3f85ee7de0eec2a83108ea98eff2
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 26 08:58:47 2005 +0000
+
+ Compilation fix.
+
+ svn path=/trunk/mono/; revision=50748
+
+commit 541c387c65579ca75abe8cdb9d0725c1e6d90df1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Sep 11 16:55:41 2005 +0000
+
+ 2005-09-11 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro.
+
+ svn path=/trunk/mono/; revision=49910
+
+commit efbd8e41cf3337d59812a7cca48df3caee116b07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 10 20:50:37 2005 +0000
+
+ 2005-09-10 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions.
+ Integrate emission of unwind directives into the assembly macros.
+
+ svn path=/trunk/mono/; revision=49875
+
+commit 8b07d9836f60fee4ff83a14ce110921be8ef8f2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 3 22:06:10 2005 +0000
+
+ 2005-09-04 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_no_stop): New macro.
+
+ svn path=/trunk/mono/; revision=49399
+
+commit 4e89407a4a8dc38125a804df930515a31603cdca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 27 14:33:09 2005 +0000
+
+ 2005-08-27 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Fix some bugs.
+
+ * ia64/codegen.c: Update to work with latest ia64-codegen.h
+
+ svn path=/trunk/mono/; revision=48969
+
+commit 9a52b3ea85b1899c6cc23263eec6879841b3fd08
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 26 13:34:24 2005 +0000
+
+ 2005-08-26 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/Makefile.am: Distribute ia64-codegen.h.
+
+ svn path=/trunk/mono/; revision=48891
+
+commit 16291812e22e9750bf101e297fc573ce35bab382
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:58:33 2005 +0000
+
+ Oops
+
+ svn path=/trunk/mono/; revision=48874
+
+commit d4b1ea47e0395555276e1a6c8ddfa3800692b6ea
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:48:41 2005 +0000
+
+ Include files for 'make dist'
+
+ svn path=/trunk/mono/; revision=48871
+
+commit cac0da0afb2a782de1db55a000a2125531e757fd
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 20 22:16:11 2005 +0000
+
+ 2005-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs.
+
+ svn path=/trunk/mono/; revision=48614
+
+commit d151f0e0b203a78ca99cab91d9df89ffe7728880
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Aug 17 20:28:30 2005 +0000
+
+ 2005-08-17 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add dependency information for all instructions.
+
+ svn path=/trunk/mono/; revision=48476
+
+commit f1bce593b3504a82fc344d696eeedd91c39bcfee
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 4 18:51:34 2005 +0000
+
+ Uncommitted fixes.
+
+ svn path=/trunk/mono/; revision=48015
+
+commit 8348805e278d70da207455a0fe5cd470b00f3d8d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 30 15:43:43 2005 +0000
+
+ 2005-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47855
+
+commit 0fb75c64cb1361cc81a4e47ca556a597b440d65a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 20 16:55:20 2005 +0000
+
+ Wed Jul 20 18:01:54 BST 2005 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: more codegen macros.
+
+
+ svn path=/trunk/mono/; revision=47473
+
+commit 2205bab6932e69490e48b9e11957041e938020ee
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 18 20:33:37 2005 +0000
+
+ 2005-07-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47395
+
+commit 5a9a7537801ad68c0f8552e7e107994b793e93ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 22 22:00:43 2005 +0000
+
+ 2005-06-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add some new pseudo ops.
+
+ svn path=/trunk/mono/; revision=46401
+
+commit f51b94e34b1a887304ace96af27d51b4ec98ab4b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 19 20:18:07 2005 +0000
+
+ 2005-06-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Fix encoding of ia64_fclass.
+
+ svn path=/trunk/mono/; revision=46224
+
+commit 398224a9101808c8ca470b24366a506eeefec135
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 12 20:41:05 2005 +0000
+
+ 2005-06-12 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45834
+
+commit 5a9f032072053d76af233b9906614ee491d6295c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 9 20:22:08 2005 +0000
+
+ 2005-06-09 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45719
+
+commit 5f3ca7841b8aedd35f0c23781f2ac96f31ed501e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon May 30 14:09:48 2005 +0000
+
+ 2005-05-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/codegen.c: Fix it after latest changes.
+
+ svn path=/trunk/mono/; revision=45192
+
+commit d6844049f8659741b3afe9fa66136738107d28ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 14:24:56 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45159
+
+commit 4be6ea9e269927e9fbf06b0b73f53fef311f569f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 11:16:27 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45157
+
+commit 7b483f1f48c7abc9d0c17a1fb34b30ddaa7058bb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 18:02:41 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45147
+
+commit e360150e81b841b0644b5adc604f22f4b71e3987
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 17:08:04 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45145
+
+commit a781c3a65727b60386604adc6023f3f5a53b3e3e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 27 21:41:59 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45127
+
+commit 20c2fc7ba73ffaf5506ab9bf487c3f519de5067f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 26 17:16:50 2005 +0000
+
+ 2005-05-26 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45064
+
+commit f37723d307325b539fc515774d3988e0c7ff7a14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 18:25:06 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44892
+
+commit 1d1c3f56953c0cb26c2e695b468ea1da368aaef0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 13:31:28 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44888
+
+commit e32454dae1a3679056fb4ac86ffc81defc3a5eb7
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 01:29:00 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44883
+
+commit fee3f0247077513ba3254ddb410687a11c667b8c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 20 21:55:37 2005 +0000
+
+ 2005-05-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44855
+
+commit 1d94e7499dc18c3882f4aa16e977ceeaacddd466
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 23:02:39 2005 +0000
+
+ 2005-05-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work.
+
+ svn path=/trunk/mono/; revision=44722
+
+commit 3f053b86a49d8c41d47ca2ff771bda64ee5a5ddc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 18:55:54 2005 +0000
+
+ 2005-05-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter.
+
+ svn path=/trunk/mono/; revision=44705
+
+commit 061e9ab4d483c98d6747caad5160bd30fbbf09ab
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 19:52:56 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * Makefile.am: Only compile libmonoarch if the interpreter is compiled.
+
+ svn path=/trunk/mono/; revision=44526
+
+commit 82a68f6e85fbc7aaa7832584b2f51953871f1390
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 17:35:42 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add IA64 code generation macros.
+
+ * Makefile.am: Add ia64 subdir.
+
+ svn path=/trunk/mono/; revision=44523
+
+commit 800d43a2433ffc57d904687fdd2b746d5277cab5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 5 12:13:33 2005 +0000
+
+ 2005-05-05 Zoltan Varga <vargaz@freemail.hu>
+
+ * alpha/tramp.c: Applied patch from Jakub Bogusz <qboosh@pld-linux.org>.
+
+ svn path=/trunk/mono/; revision=44078
+
+commit 293459dd29bdd85542f499e0530c9504ced01604
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 28 21:09:11 2005 +0000
+
+ 2005-03-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Avoid emitting a rex in some places.
+
+ svn path=/trunk/mono/; revision=42316
+
+commit 140d5636edd892a388da877b7035f1809590e7ff
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 19:47:29 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the
+ byte registers.
+
+ svn path=/trunk/mono/; revision=41848
+
+commit 242ec30220c85e3f69a1dd1d50469771c4ba7047
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 17:08:39 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro.
+
+ svn path=/trunk/mono/; revision=41842
+
+commit f7074904827b639bb500dcb92c481ec9f35a88a0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 14 15:17:54 2005 +0000
+
+ 2005-03-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add missing AMD64_XMM7.
+
+ svn path=/trunk/mono/; revision=41795
+
+commit d23ce2f6ba82d598af825e20b95cf7938ff5bc39
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 13 16:57:42 2005 +0000
+
+ 2005-03-13 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Remove some unneccesary REXes.
+
+ svn path=/trunk/mono/; revision=41765
+
+commit ad5014de38c4bde6ef12a04bbbcdf0303ac8acc1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 11:11:38 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size
+ variants to some sse2 macros.
+
+ svn path=/trunk/mono/; revision=41557
+
+commit ee4c2805588b6d8291ac4349a520ca9c99050b59
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 09:28:19 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert
+ to a 64 bit value.
+
+ svn path=/trunk/mono/; revision=41554
+
+commit 3c4a8677815d2ad4e0b47b809ca16b43f33e3f96
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 6 21:25:22 2005 +0000
+
+ 2005-03-06 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add some SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=41491
+
+commit b175669d7abc2f7e83940305cf2cb1f7663569b0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 18:48:25 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add xadd instructions.
+
+ svn path=/trunk/mono/; revision=40956
+
+commit c7a5bc7b7055832a36dc63ba67ad7add33a95d06
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 14:16:51 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex.
+
+ svn path=/trunk/mono/; revision=40934
+
+commit 2cf88a5c39f13e54cc5e5f95ab6021924077c1d8
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Feb 16 04:43:00 2005 +0000
+
+ remove .cvsignore, as this is not used anymore
+
+ svn path=/trunk/mono/; revision=40731
+
+commit 0c1ce771e696eabde58e35deb64c0b578be7a92d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jan 10 21:13:14 2005 +0000
+
+ - Fix atomic ops on s390
+ - Implement OP_ATOMIC_xxx operations on s390
+ - Standardize exception handling on s390 with other platforms
+ - Enable out of line bblock support
+ - Check vtable slot belongs to domain when building trampoline
+
+ svn path=/trunk/mono/; revision=38647
+
+commit 9f3d964963eac63f42db702fe80cbfa89e3a73b4
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Mon Dec 13 06:05:53 2004 +0000
+
+ remove svn:executable from *.cs *.c *.h
+
+ svn path=/trunk/mono/; revision=37682
+
+commit c7b8d172d479d75da8d183f9491e4651bbc5b4f7
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 7 04:18:03 2004 +0000
+
+ Fix atomic operations and add initial support for tls support.
+
+ svn path=/trunk/mono/; revision=37284
+
+commit c523c66bf11c9c05df3d77d42f8be9821ad558e5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 25 13:32:53 2004 +0000
+
+ 2004-11-25 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Updates to support the PIC changes.
+
+ svn path=/trunk/mono/; revision=36549
+
+commit da4b0970bffc8f281679bddf7371679910d0a23c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 19 15:04:41 2004 +0000
+
+ Fri Nov 19 17:29:22 CET 2004 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: counter reg decrement branch values
+ (patch by Geoff Norton <gnorton@customerdna.com>).
+
+
+ svn path=/trunk/mono/; revision=36320
+
+commit 3e56873e56ee01f0195683a20bd44e0fd03db4ee
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Thu Nov 18 18:44:57 2004 +0000
+
+ 2004-11-16 Patrik Torstensson <patrik.torstensson@gmail.com>
+
+ * x86/x86-codegen.h: added opcodes for xadd instructions
+
+
+ svn path=/trunk/mono/; revision=36283
+
+commit 59c3726af38156a306a67c2dd6e755e8bdd0d89a
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Nov 17 03:05:28 2004 +0000
+
+ Add support for siginfo_t as a parameter to mono_arch_is_int_overflow. Support this
+ routine in s390.
+
+ svn path=/trunk/mono/; revision=36188
+
+commit 149905478e1af4189a0cd9cf3f0e294dbb2bccbc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Nov 15 19:00:05 2004 +0000
+
+ 2004-11-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/x86-64-codegen.h: Get rid of this.
+
+ svn path=/trunk/mono/; revision=36145
+
+commit b982bf7e3e3e98afa37544b4a197d406f00b5e5a
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Nov 8 03:19:16 2004 +0000
+
+ fix
+
+ svn path=/trunk/mono/; revision=35803
+
+commit 4c5436f259d4a109ab352f2ec7b7891cdce76cc9
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Sep 6 15:07:37 2004 +0000
+
+ fix warning
+
+ svn path=/trunk/mono/; revision=33415
+
+commit 3a8f0a20bd939db788d3fd871b4c0ca37a4d0f96
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Sep 1 01:04:04 2004 +0000
+
+ Support short forms of push imm
+
+ svn path=/trunk/mono/; revision=33128
+
+commit e11c33f0ae258eb62dd5fc2e4c6ce12952d25233
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 21:04:04 2004 +0000
+
+ 2004-08-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX
+ generation.
+
+ svn path=/trunk/mono/; revision=33003
+
+commit b0791969d5ddbcb465d86bcd42c86150f653a9a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 11:11:38 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: More SSE work.
+
+ svn path=/trunk/mono/; revision=32992
+
+commit 8ca359bb4894521802e1f2044ec55a9aada4c08e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 09:41:22 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=32991
+
+commit 39a59671ff853ab672d9db1c982093ee1c7cc1f8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 21 20:07:37 2004 +0000
+
+ 2004-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG
+ since under amd64, all 16 registers have a low part.
+
+ svn path=/trunk/mono/; revision=32632
+
+commit c6a18db1cda9d62eaba7e1095f34eb84e7c39a8b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Aug 16 12:58:06 2004 +0000
+
+ 2004-08-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/x86-codegen.h: Add macros for accessing the mod/rm byte.
+
+ svn path=/trunk/mono/; revision=32365
+
+commit 7f2d7df98341055eaf370855c499508599770dec
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Sat Aug 14 18:28:26 2004 +0000
+
+ hush cvs
+
+ svn path=/trunk/mono/; revision=32344
+
+commit ee4209b85e88e6adfc07a057b41747607235805c
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Aug 6 16:28:23 2004 +0000
+
+ Support the MEMCPY(base, base) rule and add initial ARGLIST support
+
+ svn path=/trunk/mono/; revision=31985
+
+commit ee8712fd77bdd445d98c511a07f29b5136368201
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Aug 5 23:28:29 2004 +0000
+
+ Add s390x
+
+ svn path=/trunk/mono/; revision=31966
+
+commit 17467e9a25e9a1cf71c170fd85e042a5a11a0f05
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 20:43:11 2004 +0000
+
+ Further 64-bit S/390 updates
+
+ svn path=/trunk/mono/; revision=31898
+
+commit 4ad821169050e70979e71bbd5229557570059139
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 02:54:52 2004 +0000
+
+ S/390 64-bit support
+ tailc processing fix for S/390 32-bit
+
+ svn path=/trunk/mono/; revision=31840
+
+commit 5ebecc33aca9878d2071c8766e5741cd6434d676
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 23:11:29 2004 +0000
+
+ Add some s390 specific tests
+
+ svn path=/trunk/mono/; revision=31690
+
+commit 4e44c97a16962680e5009c97c0022e10ddbbad30
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 18:23:23 2004 +0000
+
+ Optimize code generation macros and standardize
+
+ svn path=/trunk/mono/; revision=31683
+
+commit 57ac232b2805d02a4e2b6322ed9532313337e56c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 30 16:01:49 2004 +0000
+
+ 2004-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31664
+
+commit 128d13d3973f07f5afba3ac7022bd9a4e7550626
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Thu Jul 29 17:10:53 2004 +0000
+
+ 2004-07-29 Ben Maurer <bmaurer@ximian.com>
+
+ * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm
+
+ svn path=/trunk/mono/; revision=31622
+
+commit 77b5d5d9a5c508cef6a93be733818c446b9fe12c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 28 20:14:03 2004 +0000
+
+ 2004-07-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31586
+
+commit a451b99d1a51fe3ffa7334ffbe6865f388e549c0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 24 18:29:32 2004 +0000
+
+ 2004-07-24 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31431
+
+commit b58d4fba4fad9c9cd52604adf39ffe578e407b14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 20:05:59 2004 +0000
+
+ 2004-07-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31426
+
+commit c7d11ced2179a38a406489b57f4a2f317fbe5da3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 16:07:08 2004 +0000
+
+ 2004-07-23 zovarga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31416
+
+commit f69c71790b01b62dd17d4479db005c3ef68e5e38
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 23:03:57 2004 +0000
+
+ Add mvcl instruction
+
+ svn path=/trunk/mono/; revision=31055
+
+commit c9c82671d87761dc9a06b78082402924cf8f540d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 12:05:08 2004 +0000
+
+ Add instructions to support enhanced memory-to-memory operations.
+
+ svn path=/trunk/mono/; revision=31039
+
+commit 08a92e1c00c0a0cf3c446257b446939062605260
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 30 15:04:48 2004 +0000
+
+ 2004-06-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add SPARC64 support.
+
+ svn path=/trunk/mono/; revision=30577
+
+commit d1881ea0cd90053526fa30405f4aeac90e06b485
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jun 18 20:03:01 2004 +0000
+
+ Fix broken ABI for stack parameters
+
+ svn path=/trunk/mono/; revision=29915
+
+commit 4e0bce5ca726ed3d2a33d6cfdc3b41b04fcb91f8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 17 16:25:19 2004 +0000
+
+ API cleanup fixes.
+
+ svn path=/trunk/mono/; revision=29787
+
+commit 1ac8bbc10c8f2cff9fe8aef20bee51612aa77f88
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 16 15:24:15 2004 +0000
+
+ Wed Jun 16 18:11:41 CEST 2004 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, *.c, *.h: more API cleanups.
+
+ svn path=/trunk/mono/; revision=29691
+
+commit cf789b0df2ab67298e712242ca201bd01d38c254
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 21 13:04:55 2004 +0000
+
+ More encoding fixes.
+
+ svn path=/trunk/mono/; revision=27820
+
+commit 47892f7ea09d90ff4385b3f9c3796d5ce80ee76d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon May 10 14:37:42 2004 +0000
+
+ Fix macros.
+
+ svn path=/trunk/mono/; revision=27028
+
+commit e85ff74df8db9dbeaa2f923b2d4b451fd84dcdc0
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Sat May 8 01:03:26 2004 +0000
+
+ 2004-05-07 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32
+
+ svn path=/trunk/mono/; revision=26957
+
+commit f4dcc4e46be455a7a289a969529ba4a1cd0bc3f3
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri May 7 19:53:40 2004 +0000
+
+ Bring s390 JIT up to date.
+
+ svn path=/trunk/mono/; revision=26943
+
+commit e79a83571f6126771c5e997560dd7e15c540df3f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Fri Apr 30 03:47:45 2004 +0000
+
+ 2004-04-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/tramp.c: use sizeof (stackval), fix
+ delegate tramp frame layout for Apple
+
+ svn path=/trunk/mono/; revision=26383
+
+commit f05e6864576c8c9e827cf6affbaff770732628d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 29 18:59:24 2004 +0000
+
+ Fix stmw opcode with signed offsets.
+
+ svn path=/trunk/mono/; revision=26328
+
+commit 92e3edf52f04c550767f3ae59c0f7fcefb46cbf8
+Author: Urs C. Muff <urs@mono-cvs.ximian.com>
+Date: Wed Apr 28 03:59:07 2004 +0000
+
+ cleanup
+
+ svn path=/trunk/mono/; revision=26114
+
+commit ab07311f8d1aeb258795fc72c5ed216f603db092
+Author: David Waite <david@alkaline-solutions.com>
+Date: Tue Apr 27 04:13:19 2004 +0000
+
+ 2004-04-26 David Waite <mass@akuma.org>
+
+ * unknown.c: modify to have content for defined platforms (to
+ avoid ISO C warning)
+
+ svn path=/trunk/mono/; revision=26036
+
+commit 9b84c8398a2558c61613ec50d3c3546627ac1e2d
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Apr 13 04:31:05 2004 +0000
+
+ ignores
+
+ svn path=/trunk/mono/; revision=25379
+
+commit 8adf42aeb550308e5a30e4308ad639fafa27e7e3
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:44:17 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h:
+ fix displacements in FP instrs
+
+ svn path=/trunk/mono/; revision=24755
+
+commit e82c4f6b16e7d3a7bdabe2df046b7ce17d91e716
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:18:11 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * amd64/tramp.c:
+ * arm/tramp.c:
+ * hppa/tramp.c:
+ * ppc/tramp.c:
+ * s390/tramp.c:
+ * sparc/tramp.c:
+ * x86/tramp.c:
+ remove child from MonoInvocation as it isn't used.
+
+ svn path=/trunk/mono/; revision=24751
+
+commit 73296dcd03106668c5db4511948983bdadeaee2f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 23 22:01:55 2004 +0000
+
+ 2004-03-23 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h: created
+
+ * hppa/tramp.c: changed style to be more like
+ other platforms.
+
+ * hppa/Makefile.am: add hppa-codegen.h
+
+ svn path=/trunk/mono/; revision=24504
+
+commit 6e46d909fa182adf4051e1a3c07bae63b93a2bc3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 16 19:22:52 2004 +0000
+
+ 2004-03-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add v9 branches with prediction.
+
+ svn path=/trunk/mono/; revision=24153
+
+commit 49a337364d8413d2528fe97e68f16ef610bb3c6a
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:20:03 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=24136
+
+commit ce4b3b024bba2c8bd4d874a75ef7aa23e118abf7
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:35 2004 +0000
+
+ Rename, since stupid cvs gets confused with the dash in x86-64
+
+ svn path=/trunk/mono/; revision=24134
+
+commit 01dc8bdaddab8f9b1c939716c36d13a35cf2494d
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:07 2004 +0000
+
+ Added back
+
+ svn path=/trunk/mono/; revision=24133
+
+commit a97ef493bb1e42b3afa548e47e3e14afe028b3ef
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:03:49 2004 +0000
+
+ Add x86-64
+
+ svn path=/trunk/mono/; revision=24131
+
+commit 25f79c5f1b26de4e7a413128d37731e1fcf09f14
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 16 00:02:55 2004 +0000
+
+ 2004-03-15 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg
+ so Sun's dis command recognizes it.
+
+ svn path=/trunk/mono/; revision=24084
+
+commit 38dd3d4c585c7e9cc116b7dfb5e89356c4d02da2
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 15 17:28:56 2004 +0000
+
+ 2004-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add some v9 instructions.
+
+ svn path=/trunk/mono/; revision=24050
+
+commit 36d64a0bbf92ca51335ddcb87627a8194f601820
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Mar 11 18:23:26 2004 +0000
+
+ 2004-03-11 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Ongoing sparc work.
+
+ svn path=/trunk/mono/; revision=23926
+
+commit 7e46377b331225994068d848d9ff8ceaeb96d38a
+Author: Duncan Mak <duncan@mono-cvs.ximian.com>
+Date: Mon Mar 8 01:47:03 2004 +0000
+
+ 2004-03-07 Duncan Mak <duncan@ximian.com>
+
+ * Makefile.am: Removed the reference to 'x86-64'. This was the cause
+ of the missing Mono daily tarballs, 'make dist' wasn't working.
+
+ We do have an 'amd64' directory, but it doesn't make it in 'make
+ dist'.
+
+ svn path=/trunk/mono/; revision=23784
+
+commit 94156ea640c77f37c64332acd21adf4170ecb67b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sat Feb 28 15:53:18 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=23562
+
+commit c2492eb99fe2c3e148a8dc629cc283fafad7af7c
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:30 2004 +0000
+
+ Remove amd64
+
+ svn path=/trunk/mono/; revision=23540
+
+commit c58af24e593b96f1ccc7819ab100063aa4db3c54
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:17 2004 +0000
+
+ Add x86-64 directory
+
+ svn path=/trunk/mono/; revision=23539
+
+commit 7fd6186b66f081ef6c0fca7708ddf8a641a09eae
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Feb 24 18:01:50 2004 +0000
+
+ Add amd64 support patch from Zalman Stern
+
+ svn path=/trunk/mono/; revision=23411
+
+commit 5d0cafa77c2cd95cb92a2990184bac64ec287016
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:14:37 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones.
+
+ svn path=/trunk/mono/; revision=23248
+
+commit f9f3c20b070f92bcf6f85f5bd68a24c3434fe6c4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:13:23 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Fix alignment of structures containing doubles.
+
+ svn path=/trunk/mono/; revision=23247
+
+commit bb16201aaa018434f551c2657d9e38f28dfe8904
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 2 15:56:15 2004 +0000
+
+ 2004-02-02 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Implement all floating point argument passing conventions in
+ Sparc V8. Also fix structure passing in V8.
+
+ svn path=/trunk/mono/; revision=22704
+
+commit 66607f84556593e2c3aa39bba418801193b6fddf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sun Jan 18 18:00:40 2004 +0000
+
+ Apply patches from Neale Ferguson for s390 support
+
+ svn path=/trunk/mono/; revision=22226
+
+commit 963e1b962894e9b434a2e80e63394bd0d34e68b8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 3 21:42:37 2004 +0000
+
+ Codegen macros for mips.
+
+ svn path=/trunk/mono/; revision=21658
+
+commit 7e4789fdfc87f75e63612fe0aca1f66d76134ba9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Dec 3 16:48:07 2003 +0000
+
+ Typo fix.
+
+ svn path=/trunk/mono/; revision=20745
+
+commit 96651158bf48aa1c31b5f2e3ca4cbf904211b1dc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Nov 13 15:23:48 2003 +0000
+
+ Thu Nov 13 16:24:29 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct.
+
+ svn path=/trunk/mono/; revision=19938
+
+commit ebebe8e4565897dfaad69911c88f4dda134d4b84
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 31 13:03:36 2003 +0000
+
+ 2003-10-31 Zoltan Varga <vargaz@freemail.hu>
+
+ * */tramp.c (mono_create_method_pointer): Rename to
+ mono_arch_create_method_pointer, move common code to a new function in
+ interp.c.
+
+ * */tramp.c (mono_create_trampoline): Rename to
+ mono_arch_create_trampoline for consistency.
+
+ svn path=/trunk/mono/; revision=19500
+
+commit c41c989929efaf77826634392c8ce9c54525809d
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Oct 14 05:17:17 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * x86/tramp.c: restore EDX after memcpy call
+
+ svn path=/trunk/mono/; revision=19024
+
+commit e4f9a75ed58f5ca214a685041f2a538e2f40fe1f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:56:37 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * Makefile.am: add hppa subdir
+
+ svn path=/trunk/mono/; revision=18999
+
+commit fa30eb232e53c9e39eec1bd44189e8ac29ba1644
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:48:11 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/tramp.c: add initial implementation - this is 64 bit only
+ hppa/Makefile.am hppa/.cvsignore: added
+
+ svn path=/trunk/mono/; revision=18996
+
+commit 0b0945abf1e873f6a8dfb527236d8cce2ce15574
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:38:25 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation
+ for V9 (64 bit), cover more 32 bit cases as well.
+
+ svn path=/trunk/mono/; revision=18995
+
+commit 6519bafeae686f3b32870a17dc1c84ae90ec95f9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 3 08:10:57 2003 +0000
+
+ 2003-09-03 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17839
+
+commit 935c93eeaff3ad8ccee032ade3584a7f6ab8f4a1
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Aug 25 13:38:19 2003 +0000
+
+ .cvsignore update
+
+ svn path=/trunk/mono/; revision=17581
+
+commit 0fed0582997210e2a0ac71a527dbd319a85aebcb
+Author: ct <ct@localhost>
+Date: Sun Aug 24 22:49:45 2003 +0000
+
+ completed the set of floating point ops
+
+ svn path=/trunk/mono/; revision=17564
+
+commit 3d0f6d935e3a9c180d0bbb14fc371d40e53b7872
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Aug 21 15:23:31 2003 +0000
+
+ 2003-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17470
+
+commit ed628ad0776db600fab8d5e4bcd6b563f5e808fd
+Author: ct <ct@localhost>
+Date: Tue Aug 19 03:04:34 2003 +0000
+
+ added more asm macros for floating point subtraction of single/double/quad
+
+ svn path=/trunk/mono/; revision=17394
+
+commit 6260d65a087be486df039c80eba92e44eb7a220d
+Author: ct <ct@localhost>
+Date: Tue Aug 19 02:53:23 2003 +0000
+
+ added floating point instructions for adding double, single, and quad numbers
+
+ svn path=/trunk/mono/; revision=17393
+
+commit c750ad8fea95e1fc81150e516ee26fbe79ab570d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 7 14:13:05 2003 +0000
+
+ Fixed imm16 range check.
+
+ svn path=/trunk/mono/; revision=17157
+
+commit ebc38557433accd79fce2e38dff0505dfded5691
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jul 31 14:32:42 2003 +0000
+
+ Thu Jul 31 16:19:07 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in, etc.: portability fixes and support for
+ buidling outside the srcdir from Laurent Morichetti <l_m@pacbell.net>.
+
+ svn path=/trunk/mono/; revision=16937
+
+commit 6e851a87092161092c6e8f06f4de13fb45bc04a6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jul 1 11:12:47 2003 +0000
+
+ Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us).
+
+ svn path=/trunk/mono/; revision=15809
+
+commit c439e3df5cfa7c67d976258228cb9188a218c21d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 25 13:18:00 2003 +0000
+
+ FP control word enum.
+
+ svn path=/trunk/mono/; revision=15623
+
+commit 2ad34b0dc225bf0b2efeea63c2f9287a1dbad162
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jun 9 18:28:54 2003 +0000
+
+ Small updates.
+
+ svn path=/trunk/mono/; revision=15250
+
+commit df86960d595f0284a453fe3fc67687b707148dbf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed May 21 17:57:05 2003 +0000
+
+ Some fixes and more complete support.
+
+ svn path=/trunk/mono/; revision=14769
+
+commit 3af153bd53728da9da9215141b1341d60b447bd3
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed May 21 12:45:22 2003 +0000
+
+ 2003-05-21 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): dont allocate
+ I1 to registers because there is no simply way to sign extend 8bit
+ quantities in caller saved registers on x86.
+
+ * inssel-float.brg: set costs of some rules to 2 so
+ that monobure always select the arch. specific ones if supplied,
+ regardless of the order we pass the files to monoburg.
+
+ svn path=/trunk/mono/; revision=14757
+
+commit c4eeb3dfdd19546fb0712e5306d8d96a9a07580e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 20 10:44:31 2003 +0000
+
+ 2003-05-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): allocate 8/16
+ bit values to registers
+
+ svn path=/trunk/mono/; revision=14720
+
+commit 3a48ea89b161b268bb74f013cc36f6aec59e550b
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Thu May 1 23:42:01 2003 +0000
+
+ * tramp.c (mono_create_trampoline): tiny register allocation fix for reference types
+
+ svn path=/trunk/mono/; revision=14195
+
+commit 7595b109642f29ffe0cf8bb3e4411243b92a606f
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Sun Apr 27 16:04:54 2003 +0000
+
+ * tramp.c (alloc_code_buff): posix memory protection.
+ (mono_create_trampoline): new string marshaling + minor fixes.
+ (mono_create_method_pointer): delegates fix.
+
+ svn path=/trunk/mono/; revision=14046
+
+commit dfe276d1e1d116b113a639eecbc14c3661af5462
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:50:16 2003 +0000
+
+ arm-WMMX.h: initial WirelessMMX support for ARM codegen;
+
+ svn path=/trunk/mono/; revision=14044
+
+commit 27eb0661916c7c65b43def99be92895c61f4d315
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:47:57 2003 +0000
+
+ * ARM codegen update;
+
+ svn path=/trunk/mono/; revision=14043
+
+commit e1b54daadf68eef0608ac03bd6fe4dc374d78675
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Apr 27 11:40:11 2003 +0000
+
+ Make the debugging output off by default.
+
+ svn path=/trunk/mono/; revision=14039
+
+commit e679a120b848ea9e35e7c8a38ca3e03a386371c7
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Fri Feb 14 10:01:29 2003 +0000
+
+ 2003-02-14 Patrik Torstensson
+
+ * x86-codegen.h: Added fstsw op code for getting fp flags
+
+ svn path=/trunk/mono/; revision=11577
+
+commit f468e62377dfe3079f5b2bade1f43d239842e381
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Feb 1 10:02:52 2003 +0000
+
+ Sat Feb 1 10:59:31 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: update from Laramie.
+
+ svn path=/trunk/mono/; revision=11090
+
+commit cc3953655f65398b40e11fdcc97b1ae47bebfdc1
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 27 11:54:14 2003 +0000
+
+ Mon Jan 27 12:49:10 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: start of the port to the alpha architecture by
+ Laramie Leavitt (<lar@leavitt.us>).
+
+ svn path=/trunk/mono/; revision=10942
+
+commit 898dd64bddf69974ae9a22d6aa0ce9625fc9a5a0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jan 21 16:33:33 2003 +0000
+
+ Tue Jan 21 17:29:53 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: completed ppc native code generation by
+ Taylor Christopher P <ct@cs.clemson.edu>.
+
+ svn path=/trunk/mono/; revision=10778
+
+commit d2321af1b58b2fbb84c3b2cf3f6c7c7db0a787a4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jan 17 20:17:58 2003 +0000
+
+ Fri Jan 17 21:14:18 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/tramp.c: adapted to work for MacOSX (from a patch by
+ John Duncan).
+
+ svn path=/trunk/mono/; revision=10630
+
+commit 6d1b716753c1cc8a2f5c26338020941aa58ce9d7
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 15 15:21:26 2003 +0000
+
+ Update to the API change of a while ago.
+
+ svn path=/trunk/mono/; revision=10545
+
+commit d4f44103ed442b9a6e221b58b68550c1de4dfa2b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Nov 11 19:13:08 2002 +0000
+
+ Some debugging stubs.
+
+ svn path=/trunk/mono/; revision=8922
+
+commit b669ce7ac5106466cc6d57e9163ca5d6d80611aa
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 24 19:27:13 2002 +0000
+
+ s390 support from Neale Ferguson <Neale.Ferguson@SoftwareAG-USA.com>.
+
+ svn path=/trunk/mono/; revision=8521
+
+commit 457b666522f839e5e94e5fdda2284255b26d79a2
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Oct 7 03:36:50 2002 +0000
+
+ Fix some minor trampoline nags. Now down to 15 failed tests. Delegate code
+ still broken, if anyone wants to help fix it.
+
+ svn path=/trunk/mono/; revision=8041
+
+commit b6d66c3ac8ae39c47b99dd8b8a7813e6f60c47e7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Oct 3 15:30:05 2002 +0000
+
+ Changes to tramp.c. Pass more tests.
+
+ svn path=/trunk/mono/; revision=7966
+
+commit e5d299dd18e820d33cf1d74e0e2de53e163cc07b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 04:50:10 2002 +0000
+
+ Stupid off-by-one error fixed.
+
+ The problem was that I incremented gr as if we were on a PPC box. Sparc
+ doesn't need such "alignment" of the registers.
+
+ svn path=/trunk/mono/; revision=7800
+
+commit a9d8f44092c7c313efae893ff64306dc92985110
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 01:52:30 2002 +0000
+
+ arch/sparc/tramp.c: Fixed once again. Now works, mostly.
+ io-layer/atomic.h: It's sparc on gcc/solaris, and __sparc__ on gcc/linux.
+ had to add an #ifdef.
+
+ svn path=/trunk/mono/; revision=7798
+
+commit 0110bf4a5a435c5d60583887e0e0f28b7993a4cf
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Sep 23 02:25:43 2002 +0000
+
+ Starting rewrite of trampolining for SPARC. It needed some cleanup.
+
+ It doesn't work at all now. GO PROGRESS!
+
+ svn path=/trunk/mono/; revision=7728
+
+commit fe7d0f819c55d76f0cb7a54ba66d4368d40385bd
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Sep 19 18:30:56 2002 +0000
+
+ Beginning to add support for Solaris. Tested on Solaris 9.
+
+ Shared handles are still not working, will be addressed soon.
+
+ Trampoline code still broken, expect a rewrite.
+
+ svn path=/trunk/mono/; revision=7622
+
+commit 13eb9f4ebf45ffe17d555458cec8bbecefc71849
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 15:26:29 2002 +0000
+
+ retval value type fixed
+
+ svn path=/trunk/mono/; revision=7127
+
+commit 63315827a2ebc424954f4b8baf40497a5600ce7a
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 14:41:08 2002 +0000
+
+ fixed valuetypes marshaling in delegates
+
+ svn path=/trunk/mono/; revision=7126
+
+commit 82d4a3ff22ea8e8dfb9a3ec2be10657e7e25cd97
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Aug 24 23:54:12 2002 +0000
+
+ fixed struct marshaling, 108 tests pass now
+
+ svn path=/trunk/mono/; revision=7013
+
+commit b94511c33193dc728e039fa776bf3b9d5dad4e5b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 21 17:47:34 2002 +0000
+
+ fixed delegates
+
+ svn path=/trunk/mono/; revision=6862
+
+commit fafa1892b8b0315cab29de09f09f2aa5041b61a7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Tue Aug 20 15:03:07 2002 +0000
+
+ This nearly completes SPARC trampoline support for mint/mono. The delegate
+ code still needs some work.
+
+ There are bugs. Send crash reports, as well as .cs code and exe's to
+ crichton@gimp.org
+
+ Also, if anyone gets Bus Errors in the code, let me know as well, I've been
+ hunting down alignment bugs as well.
+
+ svn path=/trunk/mono/; revision=6812
+
+commit f8f8b65c484f48436941e4985cfb4b837cff4ceb
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 5 17:28:10 2002 +0000
+
+ Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix random memory read in mono_create_method_pointer.
+
+ svn path=/trunk/mono/; revision=6436
+
+commit dc11862f43a6240bcc35d2ef96fb04750c4bf930
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Aug 5 16:43:06 2002 +0000
+
+ x86-codegen.h: fixed bug in x86_memindex_emit, for basereg == EBP && disp == imm32;
+
+ svn path=/trunk/mono/; revision=6433
+
+commit 60179dd8c27bf3c080ca2c7db818c01a51c9d4b1
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Aug 5 09:53:43 2002 +0000
+
+ 2002-08-05 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): fixed stack_size bug
+
+ svn path=/trunk/mono/; revision=6408
+
+commit e13f4a98c6fe61ec768b0da9d8832814a313ed78
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:34:20 2002 +0000
+
+ more WIP
+
+ svn path=/trunk/mono/; revision=6363
+
+commit f73afba7e99de872e4e9d9dcf3c7c483632f6bc6
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:13:59 2002 +0000
+
+ more surgery
+
+ svn path=/trunk/mono/; revision=6360
+
+commit 347f6a854167fa5a26484b83736de86f5ffd8ea0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 17:55:44 2002 +0000
+
+ did quick surgery to update for Dietmar's new code
+
+ svn path=/trunk/mono/; revision=6359
+
+commit cc4396df6db395836340d26ad2f2d920f946729f
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Aug 2 07:13:54 2002 +0000
+
+ 2002-08-02 Dietmar Maurer <dietmar@ximian.com>
+
+ * marshal.c (mono_delegate_to_ftnptr): pass delegate->target
+ instead of the delegate itself as this pointer (bug #28383)
+
+ svn path=/trunk/mono/; revision=6348
+
+commit fbb833e1937ec3e3183bd1219e0f2391faa62718
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 14:17:18 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): also push the value type pointer for
+ methods returning value types.
+ (mono_create_method_pointer): support valuetype returns.
+
+ * interp.c (ves_pinvoke_method): do not call stackval_from_data if the result
+ is a value type.
+
+ svn path=/trunk/mono/; revision=6311
+
+commit 27a4251f2a6fd091ddc8084ad14a8808c136431d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 06:40:11 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (stackval_from_data): add pinvoke argument
+ (stackval_to_data): add pinvoke argument. We need consider the
+ fact that unmanages structures may have different sizes.
+
+ * x86/tramp.c (mono_create_method_pointer): allocate space for
+ value types.
+
+ svn path=/trunk/mono/; revision=6308
+
+commit 1be0ee94a17d2a4b7edb513d845d88ba5fed8285
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:53:19 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c: (mono_create_method_pointer): return method->addr for pinvoke methods
+
+ * interp.c (ves_exec_method): bug fix - directly jump to handle_exception.
+
+ svn path=/trunk/mono/; revision=6280
+
+commit 87f9fd554284e9d2037c8757a4211cf710a85ac0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:00:53 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c: use the new marshaling code. better delegate/remoting
+ support.
+
+ * debug-helpers.c (mono_method_full_name): only print a number to
+ indicate wrapper type (so that the output is more readable in traces).
+
+ * x86/tramp.c: remove code to handle PInvoke because this is no
+ longer needed.
+
+ svn path=/trunk/mono/; revision=6278
+
+commit ebf4ad275e84a3887798ac765bdc1f0ed457cd5a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 19 12:21:01 2002 +0000
+
+ Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix float loads. Simple delegate marshaling fix.
+
+ svn path=/trunk/mono/; revision=5909
+
+commit 2b677a332d7e811ca9cc75d271d069787f0495c1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jul 8 16:13:36 2002 +0000
+
+ 2002-07-08 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: marshaling for SZARRAY
+
+ svn path=/trunk/mono/; revision=5650
+
+commit ef9afb744f4679c465be380b4285928fff50db5e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Jul 6 01:41:14 2002 +0000
+
+ 2002-07-05 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: removed magic hack
+
+ svn path=/trunk/mono/; revision=5614
+
+commit 02476784232f22f91e347750c3fb8018d770d057
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jun 18 04:38:23 2002 +0000
+
+ Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal simple arrays correctly.
+
+ svn path=/trunk/mono/; revision=5316
+
+commit 5ff6eebba3bc5e1662b84a34a276d6842e41ab87
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jun 1 08:08:34 2002 +0000
+
+ Kill warning.
+
+ svn path=/trunk/mono/; revision=5075
+
+commit 0c268fdddc804751bba57401c02b139368f7a01c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 31 10:55:37 2002 +0000
+
+ Compilation fixes.
+
+ svn path=/trunk/mono/; revision=5054
+
+commit 9fe623bf5c85da9328f895680d8688987a94427e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 30 11:04:53 2002 +0000
+
+ 2002-05-30 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg (reg): bug fix in LOCALLOC
+
+ * mono.c (main): new switch --nointrinsic to disable memcpy opt.
+
+ * x86.brg: added block copy/init optimizations from
+ Serge (serge@wildwestsoftware.com)
+
+ svn path=/trunk/mono/; revision=5025
+
+commit 1b8d1ed7ce3e489dcf53cc2369a3d6d482d5901d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 28 12:23:00 2002 +0000
+
+ 2002-05-28 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: impl. CKFINITE
+
+ svn path=/trunk/mono/; revision=4988
+
+commit b0826d366f4f32c6ef772c0a9deef5a9b4157f0b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon May 27 22:56:15 2002 +0000
+
+ Updated copyright headers to the standard template
+
+ svn path=/trunk/mono/; revision=4975
+
+commit 027755140cf39776018e520f7cd838e319fb9a34
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 23 07:44:00 2002 +0000
+
+ 2002-05-23 Dietmar Maurer <dietmar@ximian.com>
+
+ * delegate.c: move the thread pool to metadata/threadpool.c, code
+ cleanup.
+
+ * threadpool.[ch]: impl. a threadpool that can
+ be used by mint and mono.
+
+ svn path=/trunk/mono/; revision=4875
+
+commit be70e94a20c2c1864f829122085bce03f24cc4e8
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed May 15 14:19:24 2002 +0000
+
+ fixed delegates return values
+
+ svn path=/trunk/mono/; revision=4662
+
+commit 89d436d12d5746d04d9f27d9897853f846d0500e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 19:00:42 2002 +0000
+
+ 2002-05-13 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): fix I8 parameters
+
+ svn path=/trunk/mono/; revision=4601
+
+commit 8e8d0cf9ac1f4aa46da775bed8da214581345ddb
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 17:24:04 2002 +0000
+
+ introduced DEBUG, disabled by default
+
+ svn path=/trunk/mono/; revision=4599
+
+commit 8d20a830d50aaf3f30869283332d654472f16890
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Fri May 10 19:25:15 2002 +0000
+
+ * x86-codegen.h: renamed FP int macro for consistency (its arg is really a membase, not mem);
+
+ svn path=/trunk/mono/; revision=4500
+
+commit 9fb095d7866ee9963f11e3bd2dcc9b9930320ddc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri May 10 13:39:09 2002 +0000
+
+ updated for new strings
+
+ svn path=/trunk/mono/; revision=4484
+
+commit 5d0a1992c7fe0252457f6644198654d06ee7a19f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 10 07:24:08 2002 +0000
+
+ Fix checks in x86_patch().
+
+ svn path=/trunk/mono/; revision=4473
+
+commit 512203d918c6998f9652d23301b553c2bb205788
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:39:01 2002 +0000
+
+ Logged changes to x86-codegen.h
+
+ svn path=/trunk/mono/; revision=4344
+
+commit 9d1e2b5076d08bd02eb28ad8b3f2a27a42449250
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:33:54 2002 +0000
+
+ * x86-codegen.h: added missing shifts;
+ 8-bit ALU operations;
+ FPU ops with integer operand;
+ FIST (without pop);
+
+ svn path=/trunk/mono/; revision=4343
+
+commit 944736b70eb0689f094fe05c7184d36f7b7421bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 3 12:52:19 2002 +0000
+
+ Added some missing FP opcodes and made x86_patch() handle also the call opcode.
+
+ svn path=/trunk/mono/; revision=4252
+
+commit d8cf0bf0270efb923d7c6e80c4e5d547d1161740
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 29 12:14:39 2002 +0000
+
+ Removed mono_string_new_wrapper().
+
+ svn path=/trunk/mono/; revision=4151
+
+commit cc03dca33b721c5b46cba47ff7a7bb80b820be6d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 22 07:32:11 2002 +0000
+
+ Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added loop instructions and made x86_patch fully
+ useful.
+
+ svn path=/trunk/mono/; revision=3950
+
+commit ab877e78de2c3ac01664dc13c13c2f231fca4c11
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Apr 20 14:32:46 2002 +0000
+
+ 2002-04-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (ves_exec_method): support internalcall String constructors
+
+ svn path=/trunk/mono/; revision=3925
+
+commit d4ccb473cf835fd07294b7da6a6d4da9e2022dcd
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Apr 10 12:34:16 2002 +0000
+
+ Forgot to commit.
+
+ svn path=/trunk/mono/; revision=3740
+
+commit 9116ce23467ea863a99b860849d867802c32187a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Apr 6 10:40:58 2002 +0000
+
+ Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix advancement od argument position on the stack.
+
+ svn path=/trunk/mono/; revision=3652
+
+commit bf0fa05ecc5f3537597c10704414544c50d3a0ed
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 4 04:42:46 2002 +0000
+
+ Remove useless comments in rules.
+
+ svn path=/trunk/mono/; revision=3595
+
+commit 3f3f1e23c3cced2e37ec49361ee3236c524ed107
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Mar 30 11:19:26 2002 +0000
+
+ fixed compiler warnings
+
+ svn path=/trunk/mono/; revision=3514
+
+commit 793cfcbae98d4847ff08aff44ffa27020260c317
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Mar 16 14:37:28 2002 +0000
+
+ Sat Mar 16 19:12:57 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: increase default allocated size for trampolines
+ and assert on overflow.
+
+ svn path=/trunk/mono/; revision=3143
+
+commit af361d9d30702937e3cd9412b987552f4652887a
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Mar 14 09:52:53 2002 +0000
+
+ 2002-03-14 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_create_native_wrapper): new code to generate
+ wrappers for calling native functions.
+
+ * icall.c (ves_icall_InternalInvoke): impl.
+
+ svn path=/trunk/mono/; revision=3103
+
+commit 670be867554bb6f1ed61a17649e21d0e25f66105
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 11 11:24:33 2002 +0000
+
+ Mon Mar 11 16:14:29 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: addex x86_clear_reg() and changed
+ x86_mov_reg_imm() to not check for imm == 0.
+
+ svn path=/trunk/mono/; revision=3051
+
+commit 51d24bbb570af055b885dfe9f06e7717e4bb3b98
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Feb 28 09:35:29 2002 +0000
+
+ impl. more CONV opcodes
+
+ svn path=/trunk/mono/; revision=2761
+
+commit d0370e0ab841b63f60170f3afcae9ee49e9faade
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Feb 28 07:43:49 2002 +0000
+
+ Thu Feb 28 12:34:21 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: start handling of more complex marshaling stuff.
+
+
+ Thu Feb 28 12:33:41 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * marshal.c, marshal.h: start of marshaling interface.
+
+ svn path=/trunk/mono/; revision=2759
+
+commit 29f73f5799fb9274a44c918cb4f63c606f765b96
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Wed Feb 27 09:12:27 2002 +0000
+
+ * Makefile.am: removed SCRIPT_SOURCES to fix automake issues.
+
+ svn path=/trunk/mono/; revision=2710
+
+commit a8b6a875977b2728019ea7cf2ea8dd432fe4469a
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:58:43 2002 +0000
+
+ * ChangeLog: ARM-related log entry.
+
+ svn path=/trunk/mono/; revision=2628
+
+commit f703ca24db3d380b37434e9f1cced6d0b45a5470
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:56:57 2002 +0000
+
+ * Makefile.am: added arm to DIST_SUBDIRS.
+
+ svn path=/trunk/mono/; revision=2627
+
+commit f107fb14e6c183972bec81e5727381f44c6a5333
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 20:46:13 2002 +0000
+
+ (mono_create_method_pointer): implements delegates with parameters
+ and return value
+
+ svn path=/trunk/mono/; revision=2618
+
+commit 2217d1a7da2572afd033b958454b9662c42022b9
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Feb 24 17:44:55 2002 +0000
+
+ * ARM support sources, initial check-in;
+
+ svn path=/trunk/mono/; revision=2615
+
+commit 56dde5e20e11f2d9d2a3522923a5a4729bed469f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 01:40:17 2002 +0000
+
+ 2002-02-24 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (mono_create_method_pointer): basic delegates
+ implementation, it works for simple delegates now and I am already
+ pretty close to have it working for every delegates, but I am
+ going to sleep and finish it tomorrow?
+
+ svn path=/trunk/mono/; revision=2611
+
+commit 0c4f3b00c8e831077c6ba1b28065e7be81bbff61
+Author: Jeffrey Stedfast <fejj@novell.com>
+Date: Fri Feb 22 19:43:09 2002 +0000
+
+ 2002-02-22 Jeffrey Stedfast <fejj@ximian.com>
+
+ * sparc/tramp.c (mono_create_trampoline): Much tinkering to get
+ the opcodes more correct. Still needs a lot of work.
+
+ svn path=/trunk/mono/; revision=2602
+
+commit 6bb3f7ead4ab8d574273f5bdacf32b29809ace80
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:57:29 2002 +0000
+
+ ops, fix return value passing
+
+ svn path=/trunk/mono/; revision=2526
+
+commit 725e90ef0e13752e357358ddef152a30beae174f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:50:13 2002 +0000
+
+ added stack saving for most arguments
+
+ svn path=/trunk/mono/; revision=2523
+
+commit 5dbc4bd3639f2d012a1103ae1b0f911768e460ab
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 19:49:10 2002 +0000
+
+ 2002-02-19 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): don't start saving 64bit
+ values to
+ even registers
+
+ svn path=/trunk/mono/; revision=2519
+
+commit e756cc154586ebdd6f4bba8b730fca09611874cf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Feb 19 15:40:57 2002 +0000
+
+ Tue Feb 19 20:19:38 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge).
+
+
+ Tue Feb 19 20:20:15 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * dump.c: the prolog is before each arg in the custom attribute blob.
+
+ svn path=/trunk/mono/; revision=2513
+
+commit 1da21d342a98bedfc9295846080043d8946f4029
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 21:10:29 2002 +0000
+
+ la la la, ChangeLog entries
+
+ svn path=/trunk/mono/; revision=2463
+
+commit b7fa0baa6c15d3ee14a1b67dd5b56d21a931894b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 20:02:39 2002 +0000
+
+ (mono_string_new_wrapper): new helper function, cut&pasted from
+ x86, modified to check for NULL text to avoid branching in
+ generated code
+ (calculate_sizes): updated for string retval changes
+ (emit_call_and_store_retval): updated for string retval
+
+ svn path=/trunk/mono/; revision=2461
+
+commit 2cee2566ae50aa32e13864135260e16fd21bfac1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 19:41:12 2002 +0000
+
+ 2002-02-17 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: fixed minimal stack size, fixed string parameters,
+ fix byte and half word parameters
+
+ * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth
+
+ svn path=/trunk/mono/; revision=2460
+
+commit c6fd0cb7010239a29091a50aa5354e96f74bedf2
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 12:22:52 2002 +0000
+
+ added some docu
+
+ svn path=/trunk/mono/; revision=2372
+
+commit 6b6716c9eaa66549c9c1cf86934a54a830afc1b6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 08:29:02 2002 +0000
+
+ pass the domain to mono_string_new
+
+ svn path=/trunk/mono/; revision=2365
+
+commit 0ffc7e417ee15973120c4f3a0cb0f2732c5c6633
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Feb 11 22:48:46 2002 +0000
+
+ More
+
+ svn path=/trunk/mono/; revision=2341
+
+commit 6f7cdfa857058ee3662e1662190315c294188ae0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 13:49:06 2002 +0000
+
+ Mon Feb 11 18:40:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * sparc/*: sparc codegen header and some untested trampoline code.
+
+ svn path=/trunk/mono/; revision=2315
+
+commit d7a858a6ac5bc37435a157cf41eb63818905a7ea
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 07:42:10 2002 +0000
+
+ Mon Feb 11 12:32:35 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix handling of multiple marshaleed strings.
+ * x86/x86-codegen.h: some code to patch branch displacements.
+
+ svn path=/trunk/mono/; revision=2308
+
+commit dd029fa4245c99073ae6863dcb8e1560cc1eedc0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Feb 1 12:04:34 2002 +0000
+
+ SHR/SHL impl.
+
+ svn path=/trunk/mono/; revision=2224
+
+commit 4a977a50d70eb75760d9555854845d32595c4093
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Feb 1 11:22:35 2002 +0000
+
+ Fri Feb 1 16:03:53 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: exception fixes. Use mono_method_pointer_get ()
+ to easy porting to other archs. Some support for overflow detection.
+
+ Fri Feb 1 16:03:00 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get ().
+
+
+ Fri Feb 1 16:13:20 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: add asserts if we are ever going to scribble over memory.
+ * socket-io.c: not all systems have AF_IRDA defined.
+
+ svn path=/trunk/mono/; revision=2223
+
+commit 2d3dbc6213f3e12d1c7b332d80fec81384612bf8
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 24 01:00:53 2002 +0000
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): Do not try to create a
+ mono_string_new if the return value from the PInvoke code is
+ NULL.
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * genwrapper.pl: Added wrappers for the mono_glob functions.
+
+ * glob.c: New file, with globing functions used by the Directory
+ code.
+
+ svn path=/trunk/mono/; revision=2139
+
+commit 5291c24b937d193ef9861c87421bab87e0fcc4da
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jan 21 20:06:20 2002 +0000
+
+ ppc changes
+
+ svn path=/trunk/mono/; revision=2090
+
+commit b5472227702fc528149111f0c4406c9dadb9a9e0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 14 07:00:24 2002 +0000
+
+ Mon Jan 14 11:50:16 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added overflow condition code and some aliases
+ for the other ccs.
+
+ svn path=/trunk/mono/; revision=1968
+
+commit a18abcd00665e9bc660b90cf4c0bdf86456067af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jan 10 16:13:26 2002 +0000
+
+ Thu Jan 10 19:36:27 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: fix mono_class_from_mono_type () for szarray types.
+ Remove unused cache check in mono_class_from_type_spec().
+ * icall.c: *type_from_name () functions handle simple arrays and byref.
+ * reflection.c: handle byref and szarray types. Handle methods without
+ body (gets P/Invoke compilation working). Handle types and fields in
+ get_token ().
+ * reflection.h: add rank to MonoTypeInfo.
+
+
+ Thu Jan 10 20:59:59 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: add a flag to mono_create_trampoline ()
+ to handle runtime methods.
+
+
+ Thu Jan 10 21:01:08 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: mono_create_trampoline (): the runtime argument is
+ needed to handle correctly delegates, the previous change in handling
+ the string return type broke them.
+
+ svn path=/trunk/mono/; revision=1950
+
+commit 66990d65e3ac907fe24cc5411591759ce60472b0
+Author: Matt Kimball <mkimball@mono-cvs.ximian.com>
+Date: Wed Jan 9 01:49:12 2002 +0000
+
+ Tue Jan 8 22:38:41 MST 2002 Matt Kimball <matt@kimball.net>
+
+ * x86/tramp.c: handle strings returned from functions in external
+ libraries by converting to a Mono string object after the pinvoke'd
+ function returns
+
+ svn path=/trunk/mono/; revision=1923
+
+commit ba9f9e77bf38e3bb4b1a888d39c7b0aab8ae09bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 5 11:15:42 2002 +0000
+
+ Sat Jan 5 15:48:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * icall.c: hack to make IsSubType work for TypeBuilders.
+ * reflection.c: emit constructors before methods.
+ Retrieve param names in mono_param_get_objects().
+
+
+ Sat Jan 5 15:45:14 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: allow classname:method name in --debug argument.
+ Fix box opcode for valuetypes. Fix a few opcode to take a 16 bit
+ index instead of 32 (stloc, ldloc, starg, etc.).
+
+
+ Sat Jan 5 15:51:06 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle short integer return types.
+
+ svn path=/trunk/mono/; revision=1852
+
+commit 0635ffef0b38bcf88cd3320939c1d96bf8bb8c0e
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 3 20:13:47 2002 +0000
+
+ Fix build for new automakes, seems to work
+
+ svn path=/trunk/mono/; revision=1795
+
+commit 054ebda213a85e3a8a1770ec5e63831e3a0f06ba
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 20 15:20:42 2001 +0000
+
+ Thu Dec 20 20:13:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix create_method_pointer() to pass the arguments
+ correctly and add check for overflow.
+
+ svn path=/trunk/mono/; revision=1656
+
+commit faaadc7132a2cdd8c13adf7fbb79d32461759493
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Dec 17 06:50:02 2001 +0000
+
+ 2001-12-16 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_handle_exception): new code to handle
+ exceptions inside unmanaged code.
+
+ * x86.brg: impl. SAVE_LMF, RESTORE_LMF, pass implizit valuetype
+ address as first argument.
+
+ * x86.brg: pass exceptions on the stack
+
+ * jit.h (ISSTRUCT): new macro to check for real value types
+ (return false for enum types).
+
+ * unicode.c (_wapi_unicode_to_utf8): byteswap UTF16 strings before
+ passing them to iconv
+
+ * file-io.c: raise exceptions if handle is invalid.
+
+ svn path=/trunk/mono/; revision=1603
+
+commit 35430229b14448182d84a7f9348995019251fb28
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 13 11:03:21 2001 +0000
+
+ Thu Dec 13 15:56:53 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: x86_mov_memindex_imm() added.
+
+ svn path=/trunk/mono/; revision=1565
+
+commit 813f9d5a9dcbe48c711bbb8bacc876e976ce0aea
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 21:23:53 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: use r12 which is volatile instead of non-volatile
+ r14 to avoid saving
+
+ svn path=/trunk/mono/; revision=1482
+
+commit 0a65eb2cf0b69f68849e7196b6e00133b3ecf3fc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 20:19:00 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS
+ generate libmonoarch for ppc
+
+ svn path=/trunk/mono/; revision=1478
+
+commit c4f49a88d52479062bd8b95669cb90c1b86242d0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:48 2001 +0000
+
+ added test
+
+ svn path=/trunk/mono/; revision=1477
+
+commit 2c1c4889b99aaf4be0b894ea24b4d92201cb282d
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:19 2001 +0000
+
+ added files for initial ppc support
+
+ svn path=/trunk/mono/; revision=1476
+
+commit 719926a4c59c399767f10b9567859300a768b05a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Nov 27 10:30:39 2001 +0000
+
+ Tue Nov 27 15:24:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x96/x86-codegen.c: x86_lea_memindex() added.
+
+ svn path=/trunk/mono/; revision=1447
+
+commit c4a26e54cfa29ea5279d1964ef4ea7f6176c0357
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 19 06:52:53 2001 +0000
+
+ Mon Nov 19 11:37:14 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: add mono_install_trampoline() so that the runtime
+ can register a function to create a trampoline: removes the ugly
+ requirement that a runtime needed to export arch_create_jit_trampoline.
+ * object.h, object.c: added mono_install_handler() so that the runtime
+ can install an handler for exceptions generated in C code (with
+ mono_raise_exception()). Added C struct for System.Delegate.
+ * pedump.c: removed arch_create_jit_trampoline.
+ * reflection.c: some cleanups to allow registering user strings and
+ later getting a token for methodrefs and fieldrefs before the assembly
+ is built.
+ * row-indexes.h: updates and fixes from the new ECMA specs.
+
+
+ Mon Nov 19 11:36:22 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * jit.c: use mono_install_trampoline (), instead of exporting
+ a function to a lower-level library.
+
+
+ Mon Nov 19 11:33:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: start adding support for handling exceptions across
+ managed/unmanaged boundaries. Cleanup Delegate method invocation.
+ Pass the correct target object in Delegate::Invoke and use the correct
+ 'this' pointer in ldvirtftn (bugs pointed out by Dietmar).
+
+ Mon Nov 19 11:32:28 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * main.c: remove arch_create_jit_trampoline().
+
+ svn path=/trunk/mono/; revision=1380
+
+commit af643d34335bfdc90a7455f99847e954456bb07d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 14 15:18:56 2001 +0000
+
+ Wed Nov 14 19:21:26 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean as a return value.
+ * x96/x86-codegen.c: x86_widen_memindex() added.
+
+
+ Wed Nov 14 19:23:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: move the stack frame dumping code to a function so it can
+ be called from the debugger. Fix virtual method lookup for interfaces.
+ Throw exceptions instead of aborting in more places.
+ Print also the message in an exception. Updates for field renames in
+ corlib.
+
+
+ Wed Nov 14 19:26:06 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.h, class.c: add a max_interface_id to MonoClass.
+ * icall.c: rename my_mono_new_object() to my_mono_new_mono_type()
+ since it's used to do that. Added mono_type_type_from_obj().
+ Make GetType() return NULL instead of segfaulting if the type was not
+ found. Handle simple arrays in assQualifiedName.
+ * object.h: add a struct to represent an Exception.
+ * reflection.c: output call convention in method signature.
+ Add code to support P/Invoke methods and fixed offsets for fields.
+
+ svn path=/trunk/mono/; revision=1352
+
+commit 041ab742894fbd6d90e2ffb3c6fddb60a869e952
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Nov 9 13:40:43 2001 +0000
+
+ 2001-11-09 Dietmar Maurer <dietmar@ximian.com>
+
+ * testjit.c (mono_analyze_stack): new BOX impl.
+
+ * x86.brg: implemented INITOBJ
+
+ * testjit.c (mono_analyze_stack): finished array support
+ (mono_analyze_stack): reimplemented DUP instruction
+
+ svn path=/trunk/mono/; revision=1308
+
+commit bff8e602354a8d32dfaed336600b5f648af06e70
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Nov 8 21:38:32 2001 +0000
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c: Include stdlib to kill warning.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * main.c (dis_property_methods): Added missing colon which avoided
+ setting loc.t
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * interp.c: Include stdlib to kill warning.
+ (check_corlib): Adjust format encodings to remove warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * reflection.c (build_compressed_metadata): Eliminates warnings
+ and uses 64-bit clean code.
+
+ * metadata.c (mono_type_hash): Change signature to eliminate warnings.
+ (mono_type_equal): Change signature to eliminate warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * monoburg.y: Include string.h, stdlib.h to kill warnings.
+
+ * sample.brg: Include string.h to remove warnings.
+
+ svn path=/trunk/mono/; revision=1298
+
+commit 306ec85b780f5f9c99ffaf19f51baa6548a298a6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Nov 7 06:33:48 2001 +0000
+
+ 2001-11-07 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (enter_method): print out all method arguments
+ (x86_magic_trampoline): impl.
+ (arch_create_simple_jit_trampoline): we use different trampolines
+ for static methods (no need to write the address back into to
+ vtable).
+
+ svn path=/trunk/mono/; revision=1278
+
+commit 689da148c801d119d0d2722ef74a497e95c5f1b3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 22 09:24:31 2001 +0000
+
+ Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean, u1 and i1 as return values.
+
+ svn path=/trunk/mono/; revision=1192
+
+commit f6b50c3852378ca35cef63056ddec70585b3ac32
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Oct 10 10:11:17 2001 +0000
+
+ Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added x86_set_{reg,mem,membase}.
+
+ svn path=/trunk/mono/; revision=1133
+
+commit 27043fee95be8bec691045d7ab39b1be553550e9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 8 14:33:48 2001 +0000
+
+ Mon Oct 8 20:27:50 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: define NO_UNALIGNED_ACCESS for platforms that
+ can't read on unaligned boundaries
+
+
+ Mon Oct 8 16:12:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: use MonoArrayType to describe the shape of an array.
+ Guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux).
+ * image.c: endian fixes by Laurent Rioux.
+ * object.h, object.c: rename MonoStringObject to MonoString and
+ MonoArrayObject to MonoArray. Change some function names to conform to
+ the style mono_<object>_<action>. mono_string_new_utf16 () takes a
+ guint16* as first argument, so don't use char*.
+ Provide macros to do the interesting things on arrays in a portable way.
+ * threads-pthread.c: updates for the API changes and #include <sched.h>
+ (required for sched_yield()).
+ * icall.c: updates for the API changes above.
+ * Makefile.am, mono-endian.c. mono-endian.h: include unaligned read routines for
+ platforms that need them.
+
+
+ Mon Oct 8 16:13:55 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * get.c, get.h: MonoArray changed in MonoArrayType.
+ * main.c: guard against calling bsearch with a NULL pointer
+ (pointed out by Laurent Rioux, smoux).
+
+
+ Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: remove mono_get_ansi_string () and use
+ mono_string_to_utf8 () instead.
+
+
+ Mon Oct 8 16:14:40 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: use the accessors provided in object.h to deal with
+ MonoArrays. Updates for API renames in metadata. Throw exception
+ in ldelema if index is out of bounds.
+
+ svn path=/trunk/mono/; revision=1122
+
+commit 4ff31b89c4d3458dc378cd2e915ed08281a21a8b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 4 13:32:23 2001 +0000
+
+ Thu Oct 4 19:10:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: MonoTypes stored in MonoClass are stored as
+ fundamental MonoTypes when the class represents a
+ fundamental type (System.Int32, ...).
+ The TypeHandle return by ldtoken is a MonoType*.
+ * icall.c: ves_icall_get_data_chunk () write out all the
+ PE/COFF stuff. Implement ves_icall_define_method (),
+ ves_icall_set_method_body (), ves_icall_type_from_handle ().
+ * image.c: properly skip unknown streams.
+ * loader.h, loader.c: add type_class to mono_defaults.
+ * metadata.c, metadata.h: export compute_size () as
+ mono_metadata_compute_size () with a better interface.
+ Typo and C&P fixes.
+ * pedump.c: don't try to print the entry point RVA if there is no entry point.
+ * reflection.c, reflection.h: many cleanups, fixes, output method
+ signatures and headers, typedef and typeref info, compress the metadata
+ tables, output all the heap streams, cli header etc.
+ * row-indexes.h: typo fixes.
+
+
+ Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: allow marshalling valuetypes if they are
+ 4 bytes long.
+
+
+ Thu Oct 4 19:05:56 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * dis-cil.c: fix printing of exception stuff.
+ * dump.c: display some more info in the typedef table dump.
+ * main.c: typo fix and method list fix.
+
+ svn path=/trunk/mono/; revision=1071
+
+commit 7328e9088acbd2609dff8d07b841c3fafd894d25
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 1 13:07:53 2001 +0000
+
+ Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment
+ and avoid a couple of unnecessary instructions.
+
+ svn path=/trunk/mono/; revision=1042
+
+commit 1fa26f9aa718559d3090d1c1275bf04d574368f0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 28 13:49:47 2001 +0000
+
+ Fri Sep 28 19:26:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c: fix type comparison for arrays.
+ * loader.h, loader.c: half-assed fix to get more tests work in cygwin.
+ Added a couple of new classes to monodefaults.
+ * icall.c: added a couple of Reflection-related internalcalls.
+ * class.h, class.c: implemented mono_ldtoken () for RuntimeTypeHandles.
+ Added a byval_arg MonoType to MonoClass.
+
+
+ Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal valuetypes that are enums.
+
+
+ Fri Sep 28 19:37:46 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: Implemented ldtoken, conv.ovf.i. Use MonoClass->byval_arg
+ (and remove related kludges). Don't choke on access to arrays of
+ references. Throw an exception when an internalcall or P/Invoke
+ function don't have an implementation. Throw and EngineException
+ for unimplemented opcodes.
+
+ svn path=/trunk/mono/; revision=1027
+
+commit 0122a3ea04b06d1d51f2756e48f6392ccac1096d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 27 09:38:19 2001 +0000
+
+ Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG
+ as basereg.
+
+ svn path=/trunk/mono/; revision=995
+
+commit a5844f903a68e9448d7031587ffbd02ed2c4f486
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Sep 26 10:33:18 2001 +0000
+
+ Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added memindex addressing mode encoding
+ (and mov to/from register opcodes).
+
+ svn path=/trunk/mono/; revision=984
+
+commit 1f45df6d593cd60780ea121d08ddd035a3418e4a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 24 13:30:32 2001 +0000
+
+ Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: don't change a MONO_TYPE_STRING to a char*
+ when it's an argument to an internalcall.
+
+
+ Mon Sep 24 18:56:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * object.c, object.h: added mono_ldstr (), mono_string_is_interned () and
+ mono_string_intern () to implement the semantics of the ldstr opcode
+ and the interning of System.Strings.
+ * icall.c: provide hooks to make String::IsIntern and String::Intern
+ internalcalls.
+
+
+ Mon Sep 24 18:50:25 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: catch a few more error conditions with exceptions instead of
+ erroring out.
+ Don't use g_print() in stack traces because it doesn't work with
+ some float values.
+ When we call an instance method of a valuetype class, unbox the 'this'
+ argument if it is an object.
+ Use mono_ldstr () to implement the ldstr opcode: it takes care of
+ interning the string if necessary.
+ Implemented new opcodes: ckfinite, cgt.un, clt.un, ldvirtftn, ldarga.
+ Fixes to handle NaNs when comparing doubles.
+ Make sure the loaded assembly has an entry point defined.
+ Fixed portability bugs in neg and not opcodes.
+
+ svn path=/trunk/mono/; revision=943
+
+commit a995bd527db97e45d979a6b97e0a15a479d2e14b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Sep 23 07:49:26 2001 +0000
+
+ Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines.
+
+ svn path=/trunk/mono/; revision=927
+
+commit c9d21b14c718c8e7f3690f5d93ac349bbdd98d88
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Sep 21 12:50:46 2001 +0000
+
+ implemented more opcodes
+
+ svn path=/trunk/mono/; revision=916
+
+commit a0930b7dcd7fe845e1c3c06f3fba6736f88d8bf9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 20 15:31:50 2001 +0000
+
+ Thu Sep 20 16:32:42 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: implemented some more opcodes: calli, rem.un,
+ shr.un, conv.u, cpobj, stobj, conv.r.un, conv.ovf.i1.un,
+ conv.ovf.i2.un, conv.ovf.i4.un, conv.ovf.i8.un, conv.ovf.i.un,
+ conv.ovf.u1.un, conv.ovf.u2.un, conv.ovf.u4.un, conv.ovf.u8.un,
+ conv.ovf.u.un.
+ Fix some 64 bit issues in the array element access code and a small bug.
+ Throw an exception on index out of range instead of asserting.
+ Throw an exception on a NULL array instead of dying.
+ Stomped a memory corruption bug (.cctor methods were freed after
+ executing them, but they are stores in MonoClass now...).
+ Added a simple facility to invoke the debugger when a named
+ function is entered (use the cmdline option --debug method_name).
+ * interp.h: fix 64 bit issue.
+
+ svn path=/trunk/mono/; revision=904
+
+commit e177e60b93378860f0573f458d06cd641770a255
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 18 07:26:43 2001 +0000
+
+ Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: remove C++ comments.
+
+ svn path=/trunk/mono/; revision=865
+
+commit 4f874ee6ae2442c99421087b5ad11eae88283d55
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 09:10:44 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: emit real code for calls
+
+ * testjit.c (create_jit_trampoline): creates a function to trigger jit
+ compilation.
+ (mono_compile_method): reversed argument order
+
+ svn path=/trunk/mono/; revision=842
+
+commit 011e42b68518f5c1397ecdc0417c021b4c524560
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 07:18:11 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest
+
+ svn path=/trunk/mono/; revision=841
+
+commit c61474703f058c226a94ba9cdfb1d19e3a45eecd
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Sep 12 03:47:43 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=792
+
+commit db78bf2c09f07356fe4c8284d1a48fa9867bd2fc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 10 14:26:02 2001 +0000
+
+ Mon Sep 10 20:19:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: check for sizeof(void*) and for the architecture.
+
+ Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, x86/Makefile.am: conditional compile logic
+ to make porting to different targets easier.
+
+ Mon Sep 10 17:24:45 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: make it work for make distcheck.
+
+ Mon Sep 10 20:21:34 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * endian.h, assembly.c: fix some endianness issues.
+
+ Mon Sep 10 20:20:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: endian fixes, comments.
+
+ svn path=/trunk/mono/; revision=783
+
+commit ce34fcec9c53a31ba2cd48f22c9a5099d02779e5
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:34:11 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=781
+
+commit 6c07667b555ca78bdad5d7b6e5aa87f8078c1989
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:14:46 2001 +0000
+
+ added the jit prototype, small fixes
+
+ svn path=/trunk/mono/; revision=780
+
+commit 680963c46ae8b96cca52387e0f5b1a2e39825b90
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 7 12:53:34 2001 +0000
+
+ Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fixes and x86_mov_membase_imm ().
+ * x86/tramp.c: implemented mono_create_method_pointer ():
+ creates a native pointer to a method implementation that can be
+ used as a normal C callback.
+
+
+ Fri Sep 7 18:45:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: make ves_exec_method () and stackval_from_data ()
+ non static. Implement a couple of runtime methods needed to
+ use delegates (ves_runtime_method ()).
+ Implemented ldftn opcode.
+
+ svn path=/trunk/mono/; revision=745
+
+commit 4c39a186f2fa0dc3cca3ae6f6dc6584c75341adf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 6 09:46:03 2001 +0000
+
+ Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added x86_rdtsc() and fixes.
+ * x86/tramp.c: create trampolines to call pinvoke methods.
+ * x86/Makefile.am: create a libmonoarch convenience library.
+
+
+ Thu Sep 6 15:41:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: link to libmonoarch.
+ * interp.h, interp.c: use mono_create_trampoline ().
+ Pass the command line arguments to Main (String[]) methods.
+
+ svn path=/trunk/mono/; revision=728
+
+commit d3a5cf739f1182a42d20f1d5ace2a272307da87f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 27 03:43:09 2001 +0000
+
+ Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added.
+
+ svn path=/trunk/mono/; revision=636
+
+commit 231c25bd596aa45a2962a9c820fc9417985a1f3f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Aug 18 06:55:29 2001 +0000
+
+ Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit().
+
+ Sat Aug 18 12:42:26 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: load also the methods when loading a class.
+
+ Sat Aug 18 12:43:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: added support code to create exceptions.
+ Changed interncal calling convnetion over to MonoInvocation, to support
+ exceptions, walking the stack back and forward and passing the 'this'
+ pointer separately (remove the cludges required before to pass this on the
+ stack). Use alloca heavily for both local vars and a copy of the incoming
+ arguments. Init local vars to zero.
+ Simplify stackval_from_data() and stackval_to_data() to only take a pointer
+ instead of pointer + offset.
+ Implement a few exceptions-related opcodes and the code to run finally, fault and
+ catch blocks as well as a stack trace if no handler is found.
+
+ Sat Aug 18 12:51:28 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: in the signature and method header store
+ only the space required for holding the loca vars and incoming arguments.
+
+ svn path=/trunk/mono/; revision=493
+
+commit 75cdbf5cd16480631ac8579c2c2f230761e4802b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 17:21:29 2001 +0000
+
+ Fixed x86_mov_reg_imm().
+
+ svn path=/trunk/mono/; revision=441
+
+commit 5263eb4d219b8054b29a4d250cec40a7c8170a84
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Aug 8 16:48:32 2001 +0000
+
+ Update copyright
+
+ svn path=/trunk/mono/; revision=440
+
+commit c9397770c008d427da0b7ad058782fc8564c10d3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 13:32:23 2001 +0000
+
+ Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h, x86/test.c: added x86 code emitter with
+ test.
+
+ svn path=/trunk/mono/; revision=435
diff --git a/src/arch/LICENSE b/src/arch/LICENSE
new file mode 100644
index 0000000..cb4a84d
--- /dev/null
+++ b/src/arch/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2001, 2002, 2003 Ximian, Inc and the individuals listed
+on the ChangeLog entries.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/arch/Makefile.am b/src/arch/Makefile.am
new file mode 100644
index 0000000..8741687
--- /dev/null
+++ b/src/arch/Makefile.am
@@ -0,0 +1,11 @@
+DIST_SUBDIRS = x86 ppc sparc arm arm64 s390x amd64 ia64 mips
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+if ARM
+# arm needs to build some stuff even in JIT mode
+SUBDIRS = $(arch_target)
+endif
+
+EXTRA_DIST = ChangeLog
+
diff --git a/src/arch/README b/src/arch/README
new file mode 100644
index 0000000..cfed57d
--- /dev/null
+++ b/src/arch/README
@@ -0,0 +1,7 @@
+mono_arch
+=========
+
+Part of Mono project, https://github.com/mono
+
+These are C macros that are useful when generating native code on various platforms.
+This code is MIT X11 licensed.
diff --git a/src/arch/arm/.gitattributes b/src/arch/arm/.gitattributes
new file mode 100644
index 0000000..4819db1
--- /dev/null
+++ b/src/arch/arm/.gitattributes
@@ -0,0 +1 @@
+/arm-wmmx.h -crlf
diff --git a/src/arch/arm/.gitignore b/src/arch/arm/.gitignore
new file mode 100644
index 0000000..978145d
--- /dev/null
+++ b/src/arch/arm/.gitignore
@@ -0,0 +1,15 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.o
+/*.la
+/*.lo
+/*.lib
+/*.obj
+/*.exe
+/*.dll
+/arm_dpimacros.h
+/arm_fpamacros.h
+/arm_vfpmacros.h
+/fixeol.sh
diff --git a/src/arch/arm/Makefile.am b/src/arch/arm/Makefile.am
new file mode 100644
index 0000000..593574c
--- /dev/null
+++ b/src/arch/arm/Makefile.am
@@ -0,0 +1,27 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-arm.la
+
+BUILT_SOURCES = arm_dpimacros.h arm_vfpmacros.h
+
+
+libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \
+ arm-codegen.c \
+ arm-codegen.h \
+ arm-dis.c \
+ arm-dis.h
+
+arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th
+ (cd $(srcdir); bash ./dpiops.sh) > $@t
+ mv $@t $@
+
+arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th
+ (cd $(srcdir); bash ./vfpops.sh) > $@t
+ mv $@t $@
+
+CLEANFILES = $(BUILT_SOURCES)
+
+EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \
+ vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh
+
diff --git a/src/arch/arm/arm-codegen.c b/src/arch/arm/arm-codegen.c
new file mode 100644
index 0000000..9914ace
--- /dev/null
+++ b/src/arch/arm/arm-codegen.c
@@ -0,0 +1,193 @@
+/*
+ * arm-codegen.c
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#include "arm-codegen.h"
+
+
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+
+ /* save args */
+ ARM_PUSH(p, (1 << ARMREG_A1)
+ | (1 << ARMREG_A2)
+ | (1 << ARMREG_A3)
+ | (1 << ARMREG_A4));
+
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) {
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF));
+
+ return p;
+}
+
+
+/* do not push A1-A4 */
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+ /* push_regs upto R10 will be saved */
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ /* restore IP from stack */
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+/* Bit scan forward. */
+int arm_bsf(armword_t val) {
+ int i;
+ armword_t mask;
+
+ if (val == 0) return 0;
+ for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1);
+
+ return i;
+}
+
+
+int arm_is_power_of_2(armword_t val) {
+ return ((val & (val-1)) == 0);
+}
+
+
+/*
+ * returns:
+ * 1 - unable to represent
+ * positive even number - MOV-representable
+ * negative even number - MVN-representable
+ */
+int calc_arm_mov_const_shift(armword_t val) {
+ armword_t mask;
+ int res = 1, shift;
+
+ for (shift=0; shift < 32; shift+=2) {
+ mask = ARM_SCALE(0xFF, shift);
+ if ((val & (~mask)) == 0) {
+ res = shift;
+ break;
+ }
+ if (((~val) & (~mask)) == 0) {
+ res = -shift - 2;
+ break;
+ }
+ }
+
+ return res;
+}
+
+
+int is_arm_const(armword_t val) {
+ int res;
+ res = arm_is_power_of_2(val);
+ if (!res) {
+ res = calc_arm_mov_const_shift(val);
+ res = !(res < 0 || res == 1);
+ }
+ return res;
+}
+
+
+int arm_const_steps(armword_t val) {
+ int shift, steps = 0;
+
+ while (val != 0) {
+ shift = (arm_bsf(val) - 1) & (~1);
+ val &= ~(0xFF << shift);
+ ++steps;
+ }
+ return steps;
+}
+
+
+/*
+ * ARM cannot load arbitrary 32-bit constants directly into registers;
+ * widely used work-around for this is to store constants into a
+ * PC-addressable pool and use LDR instruction with PC-relative address
+ * to load constant into register. Easiest way to implement this is to
+ * embed constant inside a function with unconditional branch around it.
+ * The above method is not used at the moment.
+ * This routine always emits sequence of instructions to generate
+ * requested constant. In the worst case it takes 4 instructions to
+ * synthesize a constant - 1 MOV and 3 subsequent ORRs.
+ */
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) {
+ int mov_op;
+ int step_op;
+ int snip;
+ int shift = calc_arm_mov_const_shift(imm32);
+
+ if ((shift & 0x80000001) != 1) {
+ if (shift >= 0) {
+ ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond);
+ } else {
+ ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond);
+ }
+ } else {
+ mov_op = ARMOP_MOV;
+ step_op = ARMOP_ORR;
+
+ if (arm_const_steps(imm32) > arm_const_steps(~imm32)) {
+ mov_op = ARMOP_MVN;
+ step_op = ARMOP_SUB;
+ imm32 = ~imm32;
+ }
+
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond));
+
+ while ((imm32 ^= snip) != 0) {
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond));
+ }
+ }
+
+ return p;
+}
+
+
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) {
+ return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL);
+}
+
+
+
diff --git a/src/arch/arm/arm-codegen.h b/src/arch/arm/arm-codegen.h
new file mode 100644
index 0000000..d4d7f7c
--- /dev/null
+++ b/src/arch/arm/arm-codegen.h
@@ -0,0 +1,1127 @@
+/*
+ * arm-codegen.h
+ * Copyright (c) 2002-2003 Sergey Chaban <serge@wildwestsoftware.com>
+ * Copyright 2005-2011 Novell Inc
+ * Copyright 2011 Xamarin Inc
+ */
+
+
+#ifndef ARM_H
+#define ARM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned int arminstr_t;
+typedef unsigned int armword_t;
+
+/* Helper functions */
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size);
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs);
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs);
+int arm_is_power_of_2(armword_t val);
+int calc_arm_mov_const_shift(armword_t val);
+int is_arm_const(armword_t val);
+int arm_bsf(armword_t val);
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond);
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32);
+
+
+
+#if defined(_MSC_VER) || defined(__CC_NORCROFT)
+ void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;}
+# define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i))
+#else
+# define ARM_EMIT(p, i) do { arminstr_t *__ainstrp = (void*)(p); *__ainstrp = (arminstr_t)(i); (p) = (void*)(__ainstrp+1);} while (0)
+#endif
+
+#if defined(_MSC_VER) && !defined(ARM_NOIASM)
+# define ARM_IASM(_expr) __emit (_expr)
+#else
+# define ARM_IASM(_expr)
+#endif
+
+/* even_scale = rot << 1 */
+#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) )
+
+
+
+typedef enum {
+ ARMREG_R0 = 0,
+ ARMREG_R1,
+ ARMREG_R2,
+ ARMREG_R3,
+ ARMREG_R4,
+ ARMREG_R5,
+ ARMREG_R6,
+ ARMREG_R7,
+ ARMREG_R8,
+ ARMREG_R9,
+ ARMREG_R10,
+ ARMREG_R11,
+ ARMREG_R12,
+ ARMREG_R13,
+ ARMREG_R14,
+ ARMREG_R15,
+
+
+ /* aliases */
+ /* args */
+ ARMREG_A1 = ARMREG_R0,
+ ARMREG_A2 = ARMREG_R1,
+ ARMREG_A3 = ARMREG_R2,
+ ARMREG_A4 = ARMREG_R3,
+
+ /* local vars */
+ ARMREG_V1 = ARMREG_R4,
+ ARMREG_V2 = ARMREG_R5,
+ ARMREG_V3 = ARMREG_R6,
+ ARMREG_V4 = ARMREG_R7,
+ ARMREG_V5 = ARMREG_R8,
+ ARMREG_V6 = ARMREG_R9,
+ ARMREG_V7 = ARMREG_R10,
+
+ ARMREG_FP = ARMREG_R11,
+ ARMREG_IP = ARMREG_R12,
+ ARMREG_SP = ARMREG_R13,
+ ARMREG_LR = ARMREG_R14,
+ ARMREG_PC = ARMREG_R15,
+
+ /* co-processor */
+ ARMREG_CR0 = 0,
+ ARMREG_CR1,
+ ARMREG_CR2,
+ ARMREG_CR3,
+ ARMREG_CR4,
+ ARMREG_CR5,
+ ARMREG_CR6,
+ ARMREG_CR7,
+ ARMREG_CR8,
+ ARMREG_CR9,
+ ARMREG_CR10,
+ ARMREG_CR11,
+ ARMREG_CR12,
+ ARMREG_CR13,
+ ARMREG_CR14,
+ ARMREG_CR15,
+
+ /* XScale: acc0 on CP0 */
+ ARMREG_ACC0 = ARMREG_CR0,
+
+ ARMREG_MAX = ARMREG_R15
+} ARMReg;
+
+/* number of argument registers */
+#define ARM_NUM_ARG_REGS 4
+
+/* bitvector for all argument regs (A1-A4) */
+#define ARM_ALL_ARG_REGS \
+ (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4)
+
+
+typedef enum {
+ ARMCOND_EQ = 0x0, /* Equal; Z = 1 */
+ ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */
+ ARMCOND_CS = 0x2, /* Carry set; C = 1 */
+ ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */
+ ARMCOND_CC = 0x3, /* Carry clear; C = 0 */
+ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */
+ ARMCOND_MI = 0x4, /* Negative; N = 1 */
+ ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */
+ ARMCOND_VS = 0x6, /* Overflow; V = 1 */
+ ARMCOND_VC = 0x7, /* No overflow; V = 0 */
+ ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */
+ ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */
+ ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */
+ ARMCOND_LT = 0xB, /* Signed less than; N != V */
+ ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */
+ ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */
+ ARMCOND_AL = 0xE, /* Always */
+ ARMCOND_NV = 0xF, /* Never */
+
+ ARMCOND_SHIFT = 28
+} ARMCond;
+
+#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT)
+
+#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT)
+
+
+
+typedef enum {
+ ARMSHIFT_LSL = 0,
+ ARMSHIFT_LSR = 1,
+ ARMSHIFT_ASR = 2,
+ ARMSHIFT_ROR = 3,
+
+ ARMSHIFT_ASL = ARMSHIFT_LSL
+ /* rrx = (ror, 1) */
+} ARMShiftType;
+
+
+typedef struct {
+ armword_t PSR_c : 8;
+ armword_t PSR_x : 8;
+ armword_t PSR_s : 8;
+ armword_t PSR_f : 8;
+} ARMPSR;
+
+typedef enum {
+ ARMOP_AND = 0x0,
+ ARMOP_EOR = 0x1,
+ ARMOP_SUB = 0x2,
+ ARMOP_RSB = 0x3,
+ ARMOP_ADD = 0x4,
+ ARMOP_ADC = 0x5,
+ ARMOP_SBC = 0x6,
+ ARMOP_RSC = 0x7,
+ ARMOP_TST = 0x8,
+ ARMOP_TEQ = 0x9,
+ ARMOP_CMP = 0xa,
+ ARMOP_CMN = 0xb,
+ ARMOP_ORR = 0xc,
+ ARMOP_MOV = 0xd,
+ ARMOP_BIC = 0xe,
+ ARMOP_MVN = 0xf,
+
+
+ /* not really opcodes */
+
+ ARMOP_STR = 0x0,
+ ARMOP_LDR = 0x1,
+
+ /* ARM2+ */
+ ARMOP_MUL = 0x0, /* Rd := Rm*Rs */
+ ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */
+
+ /* ARM3M+ */
+ ARMOP_UMULL = 0x4,
+ ARMOP_UMLAL = 0x5,
+ ARMOP_SMULL = 0x6,
+ ARMOP_SMLAL = 0x7,
+
+ /* for data transfers with register offset */
+ ARM_UP = 1,
+ ARM_DOWN = 0
+} ARMOpcode;
+
+typedef enum {
+ THUMBOP_AND = 0,
+ THUMBOP_EOR = 1,
+ THUMBOP_LSL = 2,
+ THUMBOP_LSR = 3,
+ THUMBOP_ASR = 4,
+ THUMBOP_ADC = 5,
+ THUMBOP_SBC = 6,
+ THUMBOP_ROR = 7,
+ THUMBOP_TST = 8,
+ THUMBOP_NEG = 9,
+ THUMBOP_CMP = 10,
+ THUMBOP_CMN = 11,
+ THUMBOP_ORR = 12,
+ THUMBOP_MUL = 13,
+ THUMBOP_BIC = 14,
+ THUMBOP_MVN = 15,
+ THUMBOP_MOV = 16,
+ THUMBOP_CMPI = 17,
+ THUMBOP_ADD = 18,
+ THUMBOP_SUB = 19,
+ THUMBOP_CMPH = 19,
+ THUMBOP_MOVH = 20
+} ThumbOpcode;
+
+
+/* Generic form - all ARM instructions are conditional. */
+typedef struct {
+ arminstr_t icode : 28;
+ arminstr_t cond : 4;
+} ARMInstrGeneric;
+
+
+
+/* Branch or Branch with Link instructions. */
+typedef struct {
+ arminstr_t offset : 24;
+ arminstr_t link : 1;
+ arminstr_t tag : 3; /* 1 0 1 */
+ arminstr_t cond : 4;
+} ARMInstrBR;
+
+#define ARM_BR_ID 5
+#define ARM_BR_MASK 7 << 25
+#define ARM_BR_TAG ARM_BR_ID << 25
+
+#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT))
+
+/* branch */
+#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond))
+#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs))
+/* branch with link */
+#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond))
+#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs))
+
+#define ARM_DEF_BX(reg,sub,cond) (0x12fff << 8 | (reg) | ((sub) << 4) | ((cond) << ARMCOND_SHIFT))
+
+#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 1, cond))
+#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg))
+
+#define ARM_BLX_REG_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 3, cond))
+#define ARM_BLX_REG(p, reg) ARM_BLX_REG_COND((p), ARMCOND_AL, (reg))
+
+/* Data Processing Instructions - there are 3 types. */
+
+typedef struct {
+ arminstr_t imm : 8;
+ arminstr_t rot : 4;
+} ARMDPI_op2_imm;
+
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */
+ arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */
+} ARMDPI_op2_reg_shift;
+
+
+/* op2 is reg shift by imm */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t shift : 5;
+ } imm;
+} ARMDPI_op2_reg_imm;
+
+/* op2 is reg shift by reg */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */
+ arminstr_t rs : 4;
+ } reg;
+} ARMDPI_op2_reg_reg;
+
+/* Data processing instrs */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+
+ ARMDPI_op2_reg_shift op2_reg;
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ ARMDPI_op2_reg_reg op2_reg_reg;
+
+ struct {
+ arminstr_t op2 : 12; /* raw operand 2 */
+ arminstr_t rd : 4; /* destination reg */
+ arminstr_t rn : 4; /* first operand reg */
+ arminstr_t s : 1; /* S-bit controls PSR update */
+ arminstr_t opcode : 4; /* arithmetic/logic operation */
+ arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */
+ arminstr_t tag : 2; /* 0 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrDPI;
+
+#define ARM_DPI_ID 0
+#define ARM_DPI_MASK 3 << 26
+#define ARM_DPI_TAG ARM_DPI_ID << 26
+
+#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (1 << 25) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \
+ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+
+
+#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \
+ (rm) | \
+ ((shift_type & 3) << 5) | \
+ (((imm_shift) & 0x1F) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+
+/* Rd := Rn op (Rm shift_type Rs) */
+#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \
+ (rm) | \
+ (1 << 4) | \
+ ((shift_type & 3) << 5) | \
+ ((rs) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+
+
+/* Multiple register transfer. */
+typedef struct {
+ arminstr_t reg_list : 16; /* bitfield */
+ arminstr_t rn : 4; /* base reg */
+ arminstr_t ls : 1; /* load(1)/store(0) */
+ arminstr_t wb : 1; /* write-back "!" */
+ arminstr_t s : 1; /* restore PSR, force user bit */
+ arminstr_t u : 1; /* up/down */
+ arminstr_t p : 1; /* pre(1)/post(0) index */
+ arminstr_t tag : 3; /* 1 0 0 */
+ arminstr_t cond : 4;
+} ARMInstrMRT;
+
+#define ARM_MRT_ID 4
+#define ARM_MRT_MASK 7 << 25
+#define ARM_MRT_TAG ARM_MRT_ID << 25
+
+#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \
+ (regs) | \
+ (rn << 16) | \
+ (l << 20) | \
+ (w << 21) | \
+ (s << 22) | \
+ (u << 23) | \
+ (p << 24) | \
+ (ARM_MRT_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_LDM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL))
+
+/* stmdb sp!, {regs} */
+#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+
+/* ldmia sp!, {regs} */
+#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+
+/* ldmia sp, {regs} ; (no write-back) */
+#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1))
+#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2))
+#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+
+#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2))
+#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1))
+
+
+/* Multiply instructions */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 4; /* 9 */
+ arminstr_t rs : 4;
+ arminstr_t rn : 4;
+ arminstr_t rd : 4;
+ arminstr_t s : 1;
+ arminstr_t opcode : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrMul;
+
+#define ARM_MUL_ID 0
+#define ARM_MUL_ID2 9
+#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4))
+#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4))
+
+#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \
+ (rm) | \
+ ((rs) << 8) | \
+ ((rn) << 12) | \
+ ((rd) << 16) | \
+ ((s & 1) << 17) | \
+ ((op & 7) << 18) | \
+ ARM_MUL_TAG | \
+ ARM_DEF_COND(cond)
+
+/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
+#define ARM_MUL_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_MUL(p, rd, rm, rs) \
+ ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MULS_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_MULS(p, rd, rm, rs) \
+ ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
+#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+
+/* inline */
+#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_IASM_MUL(rd, rm, rs) \
+ ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL)
+#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_IASM_MULS(rd, rm, rs) \
+ ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL)
+
+
+/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */
+#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_MLA(p, rd, rm, rs, rn) \
+ ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_MLAS(p, rd, rm, rs, rn) \
+ ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+
+/* inline */
+#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_IASM_MLA(rd, rm, rs, rn) \
+ ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_IASM_MLAS(rd, rm, rs, rn) \
+ ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL)
+
+
+
+/* Word/byte transfer */
+typedef union {
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ struct {
+ arminstr_t op2_imm : 12;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t b : 1;
+ arminstr_t u : 1; /* down(0) / up(1) */
+ arminstr_t p : 1; /* post-index(0) / pre-index(1) */
+ arminstr_t type : 1; /* imm(0) / register(1) */
+ arminstr_t tag : 2; /* 0 1 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrWXfer;
+
+#define ARM_WXFER_ID 1
+#define ARM_WXFER_MASK 3 << 26
+#define ARM_WXFER_TAG ARM_WXFER_ID << 26
+
+
+#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \
+ ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_WXFER_MAX_OFFS 0xFFF
+
+/* this macro checks for imm12 bounds */
+#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \
+ do { \
+ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \
+ ? -ARM_WXFER_MAX_OFFS \
+ : (int)(imm12) > ARM_WXFER_MAX_OFFS \
+ ? ARM_WXFER_MAX_OFFS \
+ : (int)(imm12); \
+ ARM_EMIT((ptr), \
+ ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \
+ } while (0)
+
+
+/* LDRx */
+/* immediate offset, post-index */
+#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond))
+
+#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond))
+
+#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+
+#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+
+#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* STRx */
+/* immediate offset, post-index */
+#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+
+#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond))
+
+#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */
+
+#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+
+#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* write-back */
+#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond)
+#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ (1 << 25) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond)
+#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond)
+
+
+#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDR_REG_REG(p, rd, rn, rm) \
+ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDRB_REG_REG(p, rd, rn, rm) \
+ ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond))
+#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* zero-extend */
+#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STRB_REG_REG(p, rd, rn, rm) \
+ ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+
+/* ARMv4+ */
+/* Half-word or byte (signed) transfer. */
+typedef struct {
+ arminstr_t rm : 4; /* imm_lo */
+ arminstr_t tag3 : 1; /* 1 */
+ arminstr_t h : 1; /* half-word or byte */
+ arminstr_t s : 1; /* sign-extend or zero-extend */
+ arminstr_t tag2 : 1; /* 1 */
+ arminstr_t imm_hi : 4;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t type : 1; /* imm(1) / reg(0) */
+ arminstr_t u : 1; /* +- */
+ arminstr_t p : 1; /* pre/post-index */
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrHXfer;
+
+#define ARM_HXFER_ID 0
+#define ARM_HXFER_ID2 1
+#define ARM_HXFER_ID3 1
+#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4))
+#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4))
+
+#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \
+ ((imm) < 0?(-(imm)) & 0xF:(imm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((imm) < 0?((-(imm)) << 4) & 0xF00:((imm) << 4) & 0xF00) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (1 << 22) | \
+ (((int)(imm) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_IMM(p, rd, rn, imm) \
+ ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_IMM(p, rd, rn, imm) \
+ ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_IMM(p, rd, rn, imm) \
+ ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_IMM(p, rd, rn, imm) \
+ ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \
+ ((rm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (0 << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond)
+#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond)
+
+#define ARM_LDRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_REG_REG(p, rd, rm, rn) ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_REG_REG(p, rd, rm, rn) \
+ ARM_STRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+
+
+/* Swap */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag3 : 8; /* 0x9 */
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t tag2 : 2;
+ arminstr_t b : 1;
+ arminstr_t tag : 5; /* 0x2 */
+ arminstr_t cond : 4;
+} ARMInstrSwap;
+
+#define ARM_SWP_ID 2
+#define ARM_SWP_ID2 9
+#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4))
+#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4))
+
+
+
+/* Software interrupt */
+typedef struct {
+ arminstr_t num : 24;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrSWI;
+
+#define ARM_SWI_ID 0xF
+#define ARM_SWI_MASK (0xF << 24)
+#define ARM_SWI_TAG (ARM_SWI_ID << 24)
+
+
+
+/* Co-processor Data Processing */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1; /* 0 */
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4; /* CP number */
+ arminstr_t crd : 4;
+ arminstr_t crn : 4;
+ arminstr_t op : 4;
+ arminstr_t tag : 4; /* 0xE */
+ arminstr_t cond : 4;
+} ARMInstrCDP;
+
+#define ARM_CDP_ID 0xE
+#define ARM_CDP_ID2 0
+#define ARM_CDP_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4))
+
+
+/* Co-processor Data Transfer (ldc/stc) */
+typedef struct {
+ arminstr_t offs : 8;
+ arminstr_t cpn : 4;
+ arminstr_t crd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t n : 1;
+ arminstr_t u : 1;
+ arminstr_t p : 1;
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrCDT;
+
+#define ARM_CDT_ID 6
+#define ARM_CDT_MASK (7 << 25)
+#define ARM_CDT_TAG (ARM_CDT_ID << 25)
+
+
+/* Co-processor Register Transfer (mcr/mrc) */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1;
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4;
+ arminstr_t rd : 4;
+ arminstr_t crn : 4;
+ arminstr_t ls : 1;
+ arminstr_t op1 : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrCRT;
+
+#define ARM_CRT_ID 0xE
+#define ARM_CRT_ID2 0x1
+#define ARM_CRT_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4))
+
+/* Move register to PSR. */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+ struct {
+ arminstr_t rm : 4;
+ arminstr_t pad : 8; /* 0 */
+ arminstr_t tag4 : 4; /* 0xF */
+ arminstr_t fld : 4;
+ arminstr_t tag3 : 2; /* 0x2 */
+ arminstr_t sel : 1;
+ arminstr_t tag2 : 2; /* 0x2 */
+ arminstr_t type : 1;
+ arminstr_t tag : 2; /* 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrMSR;
+
+#define ARM_MSR_ID 0
+#define ARM_MSR_ID2 2
+#define ARM_MSR_ID3 2
+#define ARM_MSR_ID4 0xF
+#define ARM_MSR_MASK ((3 << 26) | \
+ (3 << 23) | \
+ (3 << 20) | \
+ (0xF << 12))
+#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \
+ (ARM_MSR_ID2 << 23) | \
+ (ARM_MSR_ID3 << 20) | \
+ (ARM_MSR_ID4 << 12))
+
+
+/* Move PSR to register. */
+typedef struct {
+ arminstr_t tag3 : 12;
+ arminstr_t rd : 4;
+ arminstr_t tag2 : 6;
+ arminstr_t sel : 1; /* CPSR | SPSR */
+ arminstr_t tag : 5;
+ arminstr_t cond : 4;
+} ARMInstrMRS;
+
+#define ARM_MRS_ID 2
+#define ARM_MRS_ID2 0xF
+#define ARM_MRS_ID3 0
+#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF)
+#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3)
+
+
+
+#include "mono/arch/arm/arm_dpimacros.h"
+
+#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0)
+
+
+#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHL_IMM(p, rd, rm, imm) \
+ ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHLS_IMM(p, rd, rm, imm) \
+ ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHR_IMM(p, rd, rm, imm) \
+ ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHRS_IMM(p, rd, rm, imm) \
+ ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SAR_IMM(p, rd, rm, imm) \
+ ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SARS_IMM(p, rd, rm, imm) \
+ ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_ROR_IMM(p, rd, rm, imm) \
+ ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_RORS_IMM(p, rd, rm, imm) \
+ ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHL_REG(p, rd, rm, rs) \
+ ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHLS_REG(p, rd, rm, rs) \
+ ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs)
+
+#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHR_REG(p, rd, rm, rs) \
+ ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHRS_REG(p, rd, rm, rs) \
+ ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs)
+
+#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SAR_REG(p, rd, rm, rs) \
+ ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SARS_REG(p, rd, rm, rs) \
+ ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs)
+
+#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_ROR_REG(p, rd, rm, rs) \
+ ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_RORS_REG(p, rd, rm, rs) \
+ ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
+
+#ifdef __native_client_codegen__
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0)
+#else
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
+#endif
+#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
+
+#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
+#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1)
+
+#define ARM_MLS(p, rd, rn, rm, ra) ARM_EMIT((p), (ARMCOND_AL << 28) | (0x6 << 20) | ((rd) << 16) | ((ra) << 12) | ((rm) << 8) | (0x9 << 4) | ((rn) << 0))
+
+/* ARM V5 */
+
+/* Count leading zeros, CLZ{cond} Rd, Rm */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 8;
+ arminstr_t rd : 4;
+ arminstr_t tag : 12;
+ arminstr_t cond : 4;
+} ARMInstrCLZ;
+
+#define ARM_CLZ_ID 0x16F
+#define ARM_CLZ_ID2 0xF1
+#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4))
+#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4))
+
+
+
+
+typedef union {
+ ARMInstrBR br;
+ ARMInstrDPI dpi;
+ ARMInstrMRT mrt;
+ ARMInstrMul mul;
+ ARMInstrWXfer wxfer;
+ ARMInstrHXfer hxfer;
+ ARMInstrSwap swp;
+ ARMInstrCDP cdp;
+ ARMInstrCDT cdt;
+ ARMInstrCRT crt;
+ ARMInstrSWI swi;
+ ARMInstrMSR msr;
+ ARMInstrMRS mrs;
+ ARMInstrCLZ clz;
+
+ ARMInstrGeneric generic;
+ arminstr_t raw;
+} ARMInstr;
+
+/* ARMv6t2 */
+
+#define ARM_MOVW_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (0 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVW_REG_IMM(p, rd, imm16) ARM_MOVW_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+#define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+/* MCR */
+#define ARM_DEF_MCR_COND(coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_DEF_COND ((cond)) | ((0xe << 24) | (((opc1) & 0x7) << 21) | (0 << 20) | (((crn) & 0xf) << 16) | (((rt) & 0xf) << 12) | (((coproc) & 0xf) << 8) | (((opc2) & 0x7) << 5) | (1 << 4) | (((crm) & 0xf) << 0))
+
+#define ARM_MCR_COND(p, coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_EMIT(p, ARM_DEF_MCR_COND ((coproc), (opc1), (rt), (crn), (crm), (opc2), (cond)))
+
+#define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \
+ ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL)
+
+/* ARMv7VE */
+#define ARM_SDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x1 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_SDIV(p, rd, rn, rm) ARM_SDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+#define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+/* ARMv7 */
+
+typedef enum {
+ ARM_DMB_SY = 0xf,
+} ArmDmbFlags;
+
+#define ARM_DMB(p, option) ARM_EMIT ((p), ((0xf << 28) | (0x57 << 20) | (0xf << 16) | (0xf << 12) | (0x0 << 8) | (0x5 << 4) | ((option) << 0)))
+
+#define ARM_LDREX_REG(p, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x1 << 20) | ((rn) << 16) | ((rt) << 12)) | (0xf << 8) | (0x9 << 4) | 0xf << 0)
+
+#define ARM_STREX_REG(p, rd, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x0 << 20) | ((rn) << 16) | ((rd) << 12)) | (0xf << 8) | (0x9 << 4) | ((rt) << 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_H */
+
diff --git a/src/arch/arm/arm-dis.c b/src/arch/arm/arm-dis.c
new file mode 100644
index 0000000..5074f26
--- /dev/null
+++ b/src/arch/arm/arm-dis.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+
+#include <stdarg.h>
+
+#include "arm-dis.h"
+#include "arm-codegen.h"
+
+
+static ARMDis* gdisasm = NULL;
+
+static int use_reg_alias = 1;
+
+const static char* cond[] = {
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "nv"
+};
+
+const static char* ops[] = {
+ "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc",
+ "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn"
+};
+
+const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"};
+
+const static char* mul_ops[] = {
+ "mul", "mla", "?", "?", "umull", "umlal", "smull", "smlal"
+};
+
+const static char* reg_alias[] = {
+ "a1", "a2", "a3", "a4",
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+
+const static char* msr_fld[] = {"f", "c", "x", "?", "s"};
+
+
+/* private functions prototypes (to keep compiler happy) */
+void chk_out(ARMDis* dis);
+void dump_reg(ARMDis* dis, int reg);
+void dump_creg(ARMDis* dis, int creg);
+void dump_reglist(ARMDis* dis, int reg_list);
+void init_gdisasm(void);
+
+void dump_br(ARMDis* dis, ARMInstr i);
+void dump_cdp(ARMDis* dis, ARMInstr i);
+void dump_cdt(ARMDis* dis, ARMInstr i);
+void dump_crt(ARMDis* dis, ARMInstr i);
+void dump_dpi(ARMDis* dis, ARMInstr i);
+void dump_hxfer(ARMDis* dis, ARMInstr i);
+void dump_mrs(ARMDis* dis, ARMInstr i);
+void dump_mrt(ARMDis* dis, ARMInstr i);
+void dump_msr(ARMDis* dis, ARMInstr i);
+void dump_mul(ARMDis* dis, ARMInstr i);
+void dump_swi(ARMDis* dis, ARMInstr i);
+void dump_swp(ARMDis* dis, ARMInstr i);
+void dump_wxfer(ARMDis* dis, ARMInstr i);
+void dump_clz(ARMDis* dis, ARMInstr i);
+
+
+/*
+void out(ARMDis* dis, const char* format, ...) {
+ va_list arglist;
+ va_start(arglist, format);
+ fprintf(dis->dis_out, format, arglist);
+ va_end(arglist);
+}
+*/
+
+
+void chk_out(ARMDis* dis) {
+ if (dis != NULL && dis->dis_out == NULL) dis->dis_out = stdout;
+}
+
+
+void armdis_set_output(ARMDis* dis, FILE* f) {
+ if (dis != NULL) {
+ dis->dis_out = f;
+ chk_out(dis);
+ }
+}
+
+FILE* armdis_get_output(ARMDis* dis) {
+ return (dis != NULL ? dis->dis_out : NULL);
+}
+
+
+
+
+void dump_reg(ARMDis* dis, int reg) {
+ reg &= 0xF;
+ if (!use_reg_alias || (reg > 3 && reg < 11)) {
+ fprintf(dis->dis_out, "r%d", reg);
+ } else {
+ fprintf(dis->dis_out, "%s", reg_alias[reg]);
+ }
+}
+
+void dump_creg(ARMDis* dis, int creg) {
+ if (dis != NULL) {
+ creg &= 0xF;
+ fprintf(dis->dis_out, "c%d", creg);
+ }
+}
+
+void dump_reglist(ARMDis* dis, int reg_list) {
+ int i = 0, j, n = 0;
+ int m1 = 1, m2, rn;
+ while (i < 16) {
+ if ((reg_list & m1) != 0) {
+ if (n != 0) fprintf(dis->dis_out, ", ");
+ n++;
+ dump_reg(dis, i);
+ for (j = i+1, rn = 0, m2 = m1<<1; j < 16; ++j, m2<<=1) {
+ if ((reg_list & m2) != 0) ++rn;
+ else break;
+ }
+ i+=rn;
+ if (rn > 1) {
+ fprintf(dis->dis_out, "-");
+ dump_reg(dis, i);
+ } else if (rn == 1) {
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i);
+ }
+ m1<<=(rn+1);
+ i++;
+ } else {
+ ++i;
+ m1<<=1;
+ }
+ }
+}
+
+
+void dump_br(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %#x",
+ (i.br.link == 1) ? "l" : "",
+ cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6));
+}
+
+
+void dump_dpi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]);
+
+ if ((i.dpi.all.opcode < ARMOP_TST || i.dpi.all.opcode > ARMOP_CMN) && (i.dpi.all.s != 0)) {
+ fprintf(dis->dis_out, "s");
+ }
+
+ fprintf(dis->dis_out, "\t");
+
+ if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) {
+ /* for comparison operations Rd is ignored */
+ dump_reg(dis, i.dpi.all.rd);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) {
+ /* for MOV/MVN Rn is ignored */
+ dump_reg(dis, i.dpi.all.rn);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if (i.dpi.all.type == 1) {
+ /* immediate */
+ if (i.dpi.op2_imm.rot != 0) {
+ fprintf(dis->dis_out, "#%d, %d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.rot << 1,
+ ARM_SCALE(i.dpi.op2_imm.imm, (i.dpi.op2_imm.rot << 1)) );
+ } else {
+ fprintf(dis->dis_out, "#%d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.imm);
+ }
+ } else {
+ /* reg-reg */
+ if (i.dpi.op2_reg.tag == 0) {
+ /* op2 is reg shift by imm */
+ dump_reg(dis, i.dpi.op2_reg_imm.r2.rm);
+ if (i.dpi.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.dpi.op2_reg_imm.r2.type], i.dpi.op2_reg_imm.imm.shift);
+ }
+ } else {
+ /* op2 is reg shift by reg */
+ dump_reg(dis, i.dpi.op2_reg_reg.r2.rm);
+ fprintf(dis->dis_out, " %s ", shift_types[i.dpi.op2_reg_reg.r2.type]);
+ dump_reg(dis, i.dpi.op2_reg_reg.reg.rs);
+ }
+
+ }
+}
+
+void dump_wxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.wxfer.all.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.wxfer.all.b == 0) ? "" : "b",
+ (i.wxfer.all.ls != 0 && i.wxfer.all.wb != 0) ? "t" : "");
+ dump_reg(dis, i.wxfer.all.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.wxfer.all.rn);
+ fprintf(dis->dis_out, "%s, ", (i.wxfer.all.p == 0) ? "]" : "");
+
+ if (i.wxfer.all.type == 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.wxfer.all.u == 0) ? "-" : "", i.wxfer.all.op2_imm);
+ } else {
+ dump_reg(dis, i.wxfer.op2_reg_imm.r2.rm);
+ if (i.wxfer.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.wxfer.op2_reg_imm.r2.type], i.wxfer.op2_reg_imm.imm.shift);
+ }
+ }
+
+ if (i.wxfer.all.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.wxfer.all.wb != 0) ? "!" : "");
+ }
+}
+
+void dump_hxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.hxfer.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.hxfer.s != 0) ? "s" : "",
+ (i.hxfer.h != 0) ? "h" : "b");
+ dump_reg(dis, i.hxfer.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.hxfer.rn);
+ fprintf(dis->dis_out, "%s, ", (i.hxfer.p == 0) ? "]" : "");
+
+ if (i.hxfer.type != 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.hxfer.u == 0) ? "-" : "", (i.hxfer.imm_hi << 4) | i.hxfer.rm);
+ } else {
+ dump_reg(dis, i.hxfer.rm);
+ }
+
+ if (i.hxfer.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.hxfer.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_mrt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t", (i.mrt.ls == 0) ? "stm" : "ldm", cond[i.mrt.cond],
+ (i.mrt.u == 0) ? "d" : "i", (i.mrt.p == 0) ? "a" : "b");
+ dump_reg(dis, i.mrt.rn);
+ fprintf(dis->dis_out, "%s, {", (i.mrt.wb != 0) ? "!" : "");
+ dump_reglist(dis, i.mrt.reg_list);
+ fprintf(dis->dis_out, "}");
+}
+
+
+void dump_swp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swp%s%s ", cond[i.swp.cond], (i.swp.b != 0) ? "b" : "");
+ dump_reg(dis, i.swp.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.swp.rm);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.swp.rn);
+ fprintf(dis->dis_out, "]");
+}
+
+
+void dump_mul(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\t", mul_ops[i.mul.opcode], cond[i.mul.cond], (i.mul.s != 0) ? "s" : "");
+ switch (i.mul.opcode) {
+ case ARMOP_MUL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ case ARMOP_MLA:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ break;
+ case ARMOP_UMULL:
+ case ARMOP_UMLAL:
+ case ARMOP_SMULL:
+ case ARMOP_SMLAL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ default:
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", i.raw);
+ break;
+ }
+}
+
+
+void dump_cdp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "cdp%s\tp%d, %d, ", cond[i.generic.cond], i.cdp.cpn, i.cdp.op);
+ dump_creg(dis, i.cdp.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crm);
+
+ if (i.cdp.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.cdp.op2);
+ }
+}
+
+
+void dump_cdt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\tp%d, ", (i.cdt.ls == 0) ? "stc" : "ldc",
+ cond[i.generic.cond], (i.cdt.n != 0) ? "l" : "", i.cdt.cpn);
+ dump_creg(dis, i.cdt.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.cdt.rn);
+
+ if (i.cdt.p == 0) {
+ fprintf(dis->dis_out, "]");
+ }
+
+ if (i.cdt.offs != 0) {
+ fprintf(dis->dis_out, ", #%d", i.cdt.offs);
+ }
+
+ if (i.cdt.p != 0) {
+ fprintf(dis->dis_out, "]%s", (i.cdt.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_crt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s\tp%d, %d, ", (i.crt.ls == 0) ? "mrc" : "mcr",
+ cond[i.generic.cond], i.crt.cpn, i.crt.op1);
+ dump_reg(dis, i.crt.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crm);
+
+ if (i.crt.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.crt.op2);
+ }
+}
+
+
+void dump_msr(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "msr%s\t%spsr_, ", cond[i.generic.cond],
+ (i.msr.all.sel == 0) ? "s" : "c");
+ if (i.msr.all.type == 0) {
+ /* reg */
+ fprintf(dis->dis_out, "%s, ", msr_fld[i.msr.all.fld]);
+ dump_reg(dis, i.msr.all.rm);
+ } else {
+ /* imm */
+ fprintf(dis->dis_out, "f, #%d", i.msr.op2_imm.imm << i.msr.op2_imm.rot);
+ }
+}
+
+
+void dump_mrs(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "mrs%s\t", cond[i.generic.cond]);
+ dump_reg(dis, i.mrs.rd);
+ fprintf(dis->dis_out, ", %spsr", (i.mrs.sel == 0) ? "s" : "c");
+}
+
+
+void dump_swi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swi%s\t%d", cond[i.generic.cond], i.swi.num);
+}
+
+
+void dump_clz(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "clz\t");
+ dump_reg(dis, i.clz.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.clz.rm);
+ fprintf(dis->dis_out, "\n");
+}
+
+
+
+void armdis_decode(ARMDis* dis, void* p, int size) {
+ int i;
+ arminstr_t* pi = (arminstr_t*)p;
+ ARMInstr instr;
+
+ if (dis == NULL) return;
+
+ chk_out(dis);
+
+ size/=sizeof(arminstr_t);
+
+ for (i=0; i<size; ++i) {
+ fprintf(dis->dis_out, "%p:\t%08x\t", pi, *pi);
+ dis->pi = pi;
+ instr.raw = *pi++;
+
+ if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) {
+ dump_br(dis, instr);
+ } else if ((instr.raw & ARM_SWP_MASK) == ARM_SWP_TAG) {
+ dump_swp(dis, instr);
+ } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) {
+ dump_mul(dis, instr);
+ } else if ((instr.raw & ARM_CLZ_MASK) == ARM_CLZ_TAG) {
+ dump_clz(dis, instr);
+ } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) {
+ dump_wxfer(dis, instr);
+ } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) {
+ dump_hxfer(dis, instr);
+ } else if ((instr.raw & ARM_DPI_MASK) == ARM_DPI_TAG) {
+ dump_dpi(dis, instr);
+ } else if ((instr.raw & ARM_MRT_MASK) == ARM_MRT_TAG) {
+ dump_mrt(dis, instr);
+ } else if ((instr.raw & ARM_CDP_MASK) == ARM_CDP_TAG) {
+ dump_cdp(dis, instr);
+ } else if ((instr.raw & ARM_CDT_MASK) == ARM_CDT_TAG) {
+ dump_cdt(dis, instr);
+ } else if ((instr.raw & ARM_CRT_MASK) == ARM_CRT_TAG) {
+ dump_crt(dis, instr);
+ } else if ((instr.raw & ARM_MSR_MASK) == ARM_MSR_TAG) {
+ dump_msr(dis, instr);
+ } else if ((instr.raw & ARM_MRS_MASK) == ARM_MRS_TAG) {
+ dump_mrs(dis, instr);
+ } else if ((instr.raw & ARM_SWI_MASK) == ARM_SWI_TAG) {
+ dump_swi(dis, instr);
+ } else {
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", instr.raw);
+ }
+
+ fprintf(dis->dis_out, "\n");
+ }
+}
+
+
+void armdis_open(ARMDis* dis, const char* dump_name) {
+ if (dis != NULL && dump_name != NULL) {
+ armdis_set_output(dis, fopen(dump_name, "w"));
+ }
+}
+
+
+void armdis_close(ARMDis* dis) {
+ if (dis->dis_out != NULL && dis->dis_out != stdout && dis->dis_out != stderr) {
+ fclose(dis->dis_out);
+ dis->dis_out = NULL;
+ }
+}
+
+
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size) {
+ armdis_open(dis, dump_name);
+ armdis_decode(dis, p, size);
+ armdis_close(dis);
+}
+
+
+void armdis_init(ARMDis* dis) {
+ if (dis != NULL) {
+ /* set to stdout */
+ armdis_set_output(dis, NULL);
+ }
+}
+
+
+
+
+void init_gdisasm() {
+ if (gdisasm == NULL) {
+ gdisasm = (ARMDis*)malloc(sizeof(ARMDis));
+ armdis_init(gdisasm);
+ }
+}
+
+void _armdis_set_output(FILE* f) {
+ init_gdisasm();
+ armdis_set_output(gdisasm, f);
+}
+
+FILE* _armdis_get_output() {
+ init_gdisasm();
+ return armdis_get_output(gdisasm);
+}
+
+void _armdis_decode(void* p, int size) {
+ init_gdisasm();
+ armdis_decode(gdisasm, p, size);
+}
+
+void _armdis_open(const char* dump_name) {
+ init_gdisasm();
+ armdis_open(gdisasm, dump_name);
+}
+
+void _armdis_close() {
+ init_gdisasm();
+ armdis_close(gdisasm);
+}
+
+void _armdis_dump(const char* dump_name, void* p, int size) {
+ init_gdisasm();
+ armdis_dump(gdisasm, dump_name, p, size);
+}
+
diff --git a/src/arch/arm/arm-dis.h b/src/arch/arm/arm-dis.h
new file mode 100644
index 0000000..8019499
--- /dev/null
+++ b/src/arch/arm/arm-dis.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#ifndef ARM_DIS
+#define ARM_DIS
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _ARMDis {
+ FILE* dis_out;
+ void* pi;
+} ARMDis;
+
+
+void _armdis_set_output(FILE* f);
+FILE* _armdis_get_output(void);
+void _armdis_decode(void* p, int size);
+void _armdis_open(const char* dump_name);
+void _armdis_close(void);
+void _armdis_dump(const char* dump_name, void* p, int size);
+
+
+void armdis_init(ARMDis* dis);
+void armdis_set_output(ARMDis* dis, FILE* f);
+FILE* armdis_get_output(ARMDis* dis);
+void armdis_decode(ARMDis* dis, void* p, int size);
+void armdis_open(ARMDis* dis, const char* dump_name);
+void armdis_close(ARMDis* dis);
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_DIS */
diff --git a/src/arch/arm/arm-vfp-codegen.h b/src/arch/arm/arm-vfp-codegen.h
new file mode 100644
index 0000000..8b56b00
--- /dev/null
+++ b/src/arch/arm/arm-vfp-codegen.h
@@ -0,0 +1,247 @@
+//
+// Copyright 2011 Xamarin Inc
+//
+
+#ifndef __MONO_ARM_VFP_CODEGEN_H__
+#define __MONO_ARM_VFP_CODEGEN_H__
+
+#include "arm-codegen.h"
+
+enum {
+ /* VFP registers */
+ ARM_VFP_F0,
+ ARM_VFP_F1,
+ ARM_VFP_F2,
+ ARM_VFP_F3,
+ ARM_VFP_F4,
+ ARM_VFP_F5,
+ ARM_VFP_F6,
+ ARM_VFP_F7,
+ ARM_VFP_F8,
+ ARM_VFP_F9,
+ ARM_VFP_F10,
+ ARM_VFP_F11,
+ ARM_VFP_F12,
+ ARM_VFP_F13,
+ ARM_VFP_F14,
+ ARM_VFP_F15,
+ ARM_VFP_F16,
+ ARM_VFP_F17,
+ ARM_VFP_F18,
+ ARM_VFP_F19,
+ ARM_VFP_F20,
+ ARM_VFP_F21,
+ ARM_VFP_F22,
+ ARM_VFP_F23,
+ ARM_VFP_F24,
+ ARM_VFP_F25,
+ ARM_VFP_F26,
+ ARM_VFP_F27,
+ ARM_VFP_F28,
+ ARM_VFP_F29,
+ ARM_VFP_F30,
+ ARM_VFP_F31,
+
+ ARM_VFP_D0 = ARM_VFP_F0,
+ ARM_VFP_D1 = ARM_VFP_F2,
+ ARM_VFP_D2 = ARM_VFP_F4,
+ ARM_VFP_D3 = ARM_VFP_F6,
+ ARM_VFP_D4 = ARM_VFP_F8,
+ ARM_VFP_D5 = ARM_VFP_F10,
+ ARM_VFP_D6 = ARM_VFP_F12,
+ ARM_VFP_D7 = ARM_VFP_F14,
+ ARM_VFP_D8 = ARM_VFP_F16,
+ ARM_VFP_D9 = ARM_VFP_F18,
+ ARM_VFP_D10 = ARM_VFP_F20,
+ ARM_VFP_D11 = ARM_VFP_F22,
+ ARM_VFP_D12 = ARM_VFP_F24,
+ ARM_VFP_D13 = ARM_VFP_F26,
+ ARM_VFP_D14 = ARM_VFP_F28,
+ ARM_VFP_D15 = ARM_VFP_F30,
+
+ ARM_VFP_COPROC_SINGLE = 10,
+ ARM_VFP_COPROC_DOUBLE = 11,
+
+#define ARM_VFP_OP(p,q,r,s) (((p) << 23) | ((q) << 21) | ((r) << 20) | ((s) << 6))
+#define ARM_VFP_OP2(Fn,N) (ARM_VFP_OP (1,1,1,1) | ((Fn) << 16) | ((N) << 7))
+
+ ARM_VFP_MUL = ARM_VFP_OP (0,1,0,0),
+ ARM_VFP_NMUL = ARM_VFP_OP (0,1,0,1),
+ ARM_VFP_ADD = ARM_VFP_OP (0,1,1,0),
+ ARM_VFP_SUB = ARM_VFP_OP (0,1,1,1),
+ ARM_VFP_DIV = ARM_VFP_OP (1,0,0,0),
+
+ ARM_VFP_CPY = ARM_VFP_OP2 (0,0),
+ ARM_VFP_ABS = ARM_VFP_OP2 (0,1),
+ ARM_VFP_NEG = ARM_VFP_OP2 (1,0),
+ ARM_VFP_SQRT = ARM_VFP_OP2 (1,1),
+ ARM_VFP_CMP = ARM_VFP_OP2 (4,0),
+ ARM_VFP_CMPE = ARM_VFP_OP2 (4,1),
+ ARM_VFP_CMPZ = ARM_VFP_OP2 (5,0),
+ ARM_VFP_CMPEZ = ARM_VFP_OP2 (5,1),
+ ARM_VFP_CVT = ARM_VFP_OP2 (7,1),
+ ARM_VFP_UITO = ARM_VFP_OP2 (8,0),
+ ARM_VFP_SITO = ARM_VFP_OP2 (8,1),
+ ARM_VFP_TOUI = ARM_VFP_OP2 (12,0),
+ ARM_VFP_TOSI = ARM_VFP_OP2 (13,0),
+ ARM_VFP_TOUIZ = ARM_VFP_OP2 (12,1),
+ ARM_VFP_TOSIZ = ARM_VFP_OP2 (13,1),
+
+ ARM_VFP_SID = 0,
+ ARM_VFP_SCR = 1 << 1,
+ ARM_VFP_EXC = 8 << 1
+};
+
+#define ARM_DEF_VFP_DYADIC(cond,cp,op,Fd,Fn,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_MONADIC(cond,cp,op,Fd,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \
+ ((offset) >= 0? (offset)>>2: -(offset)>>2) | \
+ (6 << 25) | \
+ ((cp) << 8) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ ((basereg) << 16) | \
+ ((ls) << 20) | \
+ ((wback) << 21) | \
+ (((offset) >= 0) << 23) | \
+ ((wback) << 21) | \
+ ((post) << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) \
+ (14 << 24) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((op) << 21) | \
+ ((L) << 20) | \
+ ((Rd) << 12) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ ARM_DEF_COND(cond)
+
+/* FP load and stores */
+#define ARM_FLDS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDS(p,freg,base,offset) \
+ ARM_FLDS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDD(p,freg,base,offset) \
+ ARM_FLDD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTS(p,freg,base,offset) \
+ ARM_FSTS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTD(p,freg,base,offset) \
+ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_LDR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FLDMD(p,first_reg,nregs,base) \
+ ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#define ARM_FSTMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_STR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FSTMD(p,first_reg,nregs,base) \
+ ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#include <mono/arch/arm/arm_vfpmacros.h>
+
+/* coprocessor register transfer */
+#define ARM_FMSR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg)))
+#define ARM_FMRS(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,1,(freg),(reg)))
+
+#define ARM_FMDLR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,0,(freg),(reg)))
+#define ARM_FMRDL(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,1,(freg),(reg)))
+#define ARM_FMDHR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,0,(freg),(reg)))
+#define ARM_FMRDH(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,1,(freg),(reg)))
+
+#define ARM_FMXR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg)))
+#define ARM_FMRX(p,reg,fcreg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(fcreg),(reg)))
+
+#define ARM_FMSTAT(p) \
+ ARM_FMRX((p),ARMREG_R15,ARM_VFP_SCR)
+
+#define ARM_DEF_MCRR(cond,cp,rn,rd,Fm,M) \
+ ((Fm) << 0) | \
+ (1 << 4) | \
+ ((M) << 5) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((2) << 21) | \
+ (12 << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMDRR(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_MCRR(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FMRRD(cond,cp,rn,rd,Dm,D) \
+ ((Dm) << 0) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((0xc5) << 20) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMRRD(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_FMRRD(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FUITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FUITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#endif /* __MONO_ARM_VFP_CODEGEN_H__ */
+
diff --git a/src/arch/arm/arm-wmmx.h b/src/arch/arm/arm-wmmx.h
new file mode 100644
index 0000000..427c4fc
--- /dev/null
+++ b/src/arch/arm/arm-wmmx.h
@@ -0,0 +1,177 @@
+/*
+ * ARM CodeGen
+ * XScale WirelessMMX extensions
+ * Copyright 2002 Wild West Software
+ */
+
+#ifndef __WMMX_H__
+#define __WMMX_H__ 1
+
+#if 0
+#include <arm-codegen.h>
+#endif
+
+#if defined(ARM_IASM)
+# define WM_ASM(_expr) ARM_IASM(_expr)
+#else
+# define WM_ASM(_expr) __emit (_expr)
+#endif
+
+#if defined(ARM_EMIT)
+# define WM_EMIT(p, i) ARM_EMIT(p, i)
+#else
+# define WM_EMIT(p, i)
+#endif
+
+enum {
+ WM_CC_EQ = 0x0,
+ WM_CC_NE = 0x1,
+ WM_CC_CS = 0x2,
+ WM_CC_HS = WM_CC_CS,
+ WM_CC_CC = 0x3,
+ WM_CC_LO = WM_CC_CC,
+ WM_CC_MI = 0x4,
+ WM_CC_PL = 0x5,
+ WM_CC_VS = 0x6,
+ WM_CC_VC = 0x7,
+ WM_CC_HI = 0x8,
+ WM_CC_LS = 0x9,
+ WM_CC_GE = 0xA,
+ WM_CC_LT = 0xB,
+ WM_CC_GT = 0xC,
+ WM_CC_LE = 0xD,
+ WM_CC_AL = 0xE,
+ WM_CC_NV = 0xF,
+ WM_CC_SHIFT = 28
+};
+
+#if defined(ARM_DEF_COND)
+# define WM_DEF_CC(_cc) ARM_DEF_COND(_cc)
+#else
+# define WM_DEF_CC(_cc) ((_cc & 0xF) << WM_CC_SHIFT)
+#endif
+
+
+enum {
+ WM_R0 = 0x0,
+ WM_R1 = 0x1,
+ WM_R2 = 0x2,
+ WM_R3 = 0x3,
+ WM_R4 = 0x4,
+ WM_R5 = 0x5,
+ WM_R6 = 0x6,
+ WM_R7 = 0x7,
+ WM_R8 = 0x8,
+ WM_R9 = 0x9,
+ WM_R10 = 0xA,
+ WM_R11 = 0xB,
+ WM_R12 = 0xC,
+ WM_R13 = 0xD,
+ WM_R14 = 0xE,
+ WM_R15 = 0xF,
+
+ WM_wR0 = 0x0,
+ WM_wR1 = 0x1,
+ WM_wR2 = 0x2,
+ WM_wR3 = 0x3,
+ WM_wR4 = 0x4,
+ WM_wR5 = 0x5,
+ WM_wR6 = 0x6,
+ WM_wR7 = 0x7,
+ WM_wR8 = 0x8,
+ WM_wR9 = 0x9,
+ WM_wR10 = 0xA,
+ WM_wR11 = 0xB,
+ WM_wR12 = 0xC,
+ WM_wR13 = 0xD,
+ WM_wR14 = 0xE,
+ WM_wR15 = 0xF
+};
+
+
+/*
+ * Qualifiers:
+ * H - 16-bit (HalfWord) SIMD
+ * W - 32-bit (Word) SIMD
+ * D - 64-bit (Double)
+ */
+enum {
+ WM_B = 0,
+ WM_H = 1,
+ WM_D = 2
+};
+
+/*
+ * B.2.3 Transfers From Coprocessor Register (MRC)
+ * Table B-5
+ */
+enum {
+ WM_TMRC_OP2 = 0,
+ WM_TMRC_CPNUM = 1,
+
+ WM_TMOVMSK_OP2 = 1,
+ WM_TMOVMSK_CPNUM = 0,
+
+ WM_TANDC_OP2 = 1,
+ WM_TANDC_CPNUM = 1,
+
+ WM_TORC_OP2 = 2,
+ WM_TORC_CPNUM = 1,
+
+ WM_TEXTRC_OP2 = 3,
+ WM_TEXTRC_CPNUM = 1,
+
+ WM_TEXTRM_OP2 = 3,
+ WM_TEXTRM_CPNUM = 0
+};
+
+
+/*
+ * TANDC<B,H,W>{Cond} R15
+ * Performs AND across the fields of the SIMD PSR register (wCASF) and sends the result
+ * to CPSR; can be performed after a Byte, Half-word or Word operation that sets the flags.
+ * NOTE: R15 is omitted from the macro declaration;
+ */
+#define DEF_WM_TNADC_CC(_q, _cc) WM_DEF_CC((_cc)) + ((_q) << 0x16) + 0xE13F130
+
+#define _WM_TNADC_CC(_q, _cc) WM_ASM(DEF_WM_TNADC_CC(_q, _cc))
+#define ARM_WM_TNADC_CC(_p, _q, _cc) WM_EMIT(_p, DEF_WM_TNADC_CC(_q, _cc))
+
+/* inline assembly */
+#define _WM_TNADC(_q) _WM_TNADC_CC((_q), WM_CC_AL)
+#define _WM_TNADCB() _WM_TNADC(WM_B)
+#define _WM_TNADCH() _WM_TNADC(WM_H)
+#define _WM_TNADCD() _WM_TNADC(WM_D)
+
+/* codegen */
+#define ARM_WM_TNADC(_p, _q) ARM_WM_TNADC_CC((_p), (_q), WM_CC_AL)
+#define ARM_WM_TNADCB(_p) ARM_WM_TNADC(_p, WM_B)
+#define ARM_WM_TNADCH(_p) ARM_WM_TNADC(_p, WM_H)
+#define ARM_WM_TNADCD(_p) ARM_WM_TNADC(_p, WM_D)
+
+
+/*
+ * TBCST<B,H,W>{Cond} wRd, Rn
+ * Broadcasts a value from the ARM Source reg (Rn) to every SIMD position
+ * in the WMMX Destination reg (wRd).
+ */
+#define DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn) \
+ WM_DEF_CC((_cc)) + ((_q) << 6) + ((_wrd) << 16) + ((_rn) << 12) + 0xE200010
+
+#define _WM_TBCST_CC(_q, _cc, _wrd, _rn) WM_ASM(DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+#define ARM_WM_TBCST_CC(_p, _q, _cc, _wrd, _rn) WM_EMIT(_p, DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+
+/* inline */
+#define _WM_TBCST(_q, _wrd, _rn) _WM_TBCST_CC(_q, WM_CC_AL, _wrd, _rn)
+#define _WM_TBCSTB(_wrd, _rn) _WM_TBCST(WM_B)
+#define _WM_TBCSTH(_wrd, _rn) _WM_TBCST(WM_H)
+#define _WM_TBCSTD(_wrd, _rn) _WM_TBCST(WM_D)
+
+/* codegen */
+#define ARM_WM_TBCST(_p, _q, _wrd, _rn) ARM_WM_TBCST_CC(_p, _q, WM_CC_AL, _wrd, _rn)
+#define ARM_WM_TBCSTB(_p, _wrd, _rn) _WM_TBCST(_p, WM_B)
+#define ARM_WM_TBCSTH(_p, _wrd, _rn) _WM_TBCST(_p, WM_H)
+#define ARM_WM_TBCSTD(_p, _wrd, _rn) _WM_TBCST(_p, WM_D)
+
+
+#endif /* __WMMX_H__ */
diff --git a/src/arch/arm/cmp_macros.th b/src/arch/arm/cmp_macros.th
new file mode 100644
index 0000000..cb2639d
--- /dev/null
+++ b/src/arch/arm/cmp_macros.th
@@ -0,0 +1,56 @@
+/* PSR := <Op> Rn, (imm8 ROR 2*rot) */
+#define ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rn, imm8) \
+ _<Op>_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, Rm */
+#define ARM_<Op>_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, 0, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, 0, rn, rm, cond)
+#define _<Op>_REG_REG(rn, rm) \
+ _<Op>_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, (Rm <shift_type> imm8) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpi_macros.th b/src/arch/arm/dpi_macros.th
new file mode 100644
index 0000000..be43d1f
--- /dev/null
+++ b/src/arch/arm/dpi_macros.th
@@ -0,0 +1,112 @@
+/* -- <Op> -- */
+
+/* Rd := Rn <Op> (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>S_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rd, rn, imm8) \
+ _<Op>_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _<Op>S_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(rd, rn, imm8) \
+ _<Op>S_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>_REG_REG(rd, rn, rm) \
+ _<Op>_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _<Op>S_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>S_REG_REG(rd, rn, rm) \
+ _<Op>S_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> imm_shift) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpiops.sh b/src/arch/arm/dpiops.sh
new file mode 100755
index 0000000..d3b93ff
--- /dev/null
+++ b/src/arch/arm/dpiops.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC"
+CMP_OPCODES="TST TEQ CMP CMN"
+MOV_OPCODES="MOV MVN"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+
+
+echo -e "/* Macros for DPI ops, auto-generated from template */\n"
+
+echo -e "\n/* mov/mvn */\n"
+gen "$MOV_OPCODES" mov_macros
+
+echo -e "\n/* DPIs, arithmetic and logical */\n"
+gen "$OPCODES" dpi_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* DPIs, comparison */\n"
+gen "$CMP_OPCODES" cmp_macros
+
+echo -e "\n/* end generated */\n"
diff --git a/src/arch/arm/mov_macros.th b/src/arch/arm/mov_macros.th
new file mode 100644
index 0000000..6bac290
--- /dev/null
+++ b/src/arch/arm/mov_macros.th
@@ -0,0 +1,121 @@
+/* Rd := imm8 ROR rot */
+#define ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>_REG_IMM(reg, imm8, rot) \
+ _<Op>_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>S_REG_IMM(reg, imm8, rot) \
+ _<Op>S_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>_REG_IMM8(reg, imm8) \
+ _<Op>_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(reg, imm8) \
+ _<Op>S_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>_REG_REG(rd, rm) \
+ _<Op>_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>S_REG_REG(rd, rm) \
+ _<Op>S_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/tramp.c b/src/arch/arm/tramp.c
new file mode 100644
index 0000000..f736c7a
--- /dev/null
+++ b/src/arch/arm/tramp.c
@@ -0,0 +1,710 @@
+/*
+ * Create trampolines to invoke arbitrary functions.
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ *
+ * Contributions by Malte Hildingson
+ */
+
+#include "arm-codegen.h"
+#include "arm-dis.h"
+
+#if defined(_WIN32_WCE) || defined (UNDER_CE)
+# include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#if !defined(PLATFORM_MACOSX)
+#include <errno.h>
+
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+
+
+#if 0
+# define ARM_DUMP_DISASM 1
+#endif
+
+/* prototypes for private functions (to avoid compiler warnings) */
+void flush_icache (void);
+void* alloc_code_buff (int num_instr);
+
+
+
+/*
+ * The resulting function takes the form:
+ * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments);
+ * NOTE: all args passed in ARM registers (A1-A4),
+ * then copied to R4-R7 (see definitions below).
+ */
+
+#define REG_FUNC_ADDR ARMREG_R4
+#define REG_RETVAL ARMREG_R5
+#define REG_THIS ARMREG_R6
+#define REG_ARGP ARMREG_R7
+
+
+#define ARG_SIZE sizeof(stackval)
+
+
+
+
+void flush_icache ()
+{
+#if defined(_WIN32)
+ FlushInstructionCache(GetCurrentProcess(), NULL, 0);
+#else
+# if 0
+ asm ("mov r0, r0");
+ asm ("mov r0, #0");
+ asm ("mcr p15, 0, r0, c7, c7, 0");
+# else
+ /* TODO: use (movnv pc, rx) method */
+# endif
+#endif
+}
+
+
+void* alloc_code_buff (int num_instr)
+{
+ void* code_buff;
+ int code_size = num_instr * sizeof(arminstr_t);
+
+#if defined(_WIN32) || defined(UNDER_CE)
+ int old_prot = 0;
+
+ code_buff = malloc(code_size);
+ VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot);
+#else
+ int page_size = sysconf(_SC_PAGESIZE);
+ int new_code_size;
+
+ new_code_size = code_size + page_size - 1;
+ code_buff = malloc(new_code_size);
+ code_buff = (void *) (((int) code_buff + page_size - 1) & ~(page_size - 1));
+
+ if (mprotect(code_buff, code_size, PROT_READ|PROT_WRITE|PROT_EXEC) != 0) {
+ g_critical (G_GNUC_PRETTY_FUNCTION
+ ": mprotect error: %s", g_strerror (errno));
+ }
+#endif
+
+ return code_buff;
+}
+
+
+/*
+ * Refer to ARM Procedure Call Standard (APCS) for more info.
+ */
+MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ MonoType* param;
+ MonoPIFunc code_buff;
+ arminstr_t* p;
+ guint32 code_size, stack_size;
+ guint32 simple_type;
+ int i, hasthis, aregs, regc, stack_offs;
+ int this_loaded;
+ guchar reg_alloc [ARM_NUM_ARG_REGS];
+
+ /* pessimistic estimation for prologue/epilogue size */
+ code_size = 16 + 16;
+ /* push/pop work regs */
+ code_size += 2;
+ /* call */
+ code_size += 2;
+ /* handle retval */
+ code_size += 2;
+
+ stack_size = 0;
+ hasthis = sig->hasthis ? 1 : 0;
+
+ aregs = ARM_NUM_ARG_REGS - hasthis;
+
+ for (i = 0, regc = aregs; i < sig->param_count; ++i) {
+ param = sig->params [i];
+
+ /* keep track of argument sizes */
+ if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0;
+
+ if (param->byref) {
+ if (regc > 0) {
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ code_size += 2;
+ stack_size += sizeof(gpointer);
+ }
+ } else {
+ simple_type = param->type;
+enum_calc_size:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ /* keep track of argument sizes */
+ if (regc > 1) {
+ /* fits into registers, two LDRs */
+ code_size += 2;
+ reg_alloc [i] = regc;
+ regc -= 2;
+ } else if (regc > 0) {
+ /* first half fits into register, one LDR */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ /* the rest on the stack, LDR/STR */
+ code_size += 2;
+ stack_size += 4;
+ } else {
+ /* stack arg, 4 instrs - 2x(LDR/STR) */
+ code_size += 4;
+ stack_size += 2 * 4;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+
+ if (mono_class_value_size(param->data.klass, NULL) != 4) {
+ g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL));
+ }
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ default :
+ break;
+ }
+ }
+ }
+
+ code_buff = (MonoPIFunc)alloc_code_buff(code_size);
+ p = (arminstr_t*)code_buff;
+
+ /* prologue */
+ p = arm_emit_lean_prologue(p, stack_size,
+ /* save workset (r4-r7) */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+
+ /* copy args into workset */
+ /* callme - always present */
+ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1);
+ /* retval */
+ if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) {
+ ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2);
+ }
+ /* this_obj */
+ if (sig->hasthis) {
+ this_loaded = 0;
+ if (stack_size == 0) {
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3);
+ this_loaded = 1;
+ } else {
+ ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3);
+ }
+ }
+ /* args */
+ if (sig->param_count != 0) {
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4);
+ }
+
+ stack_offs = stack_size;
+
+ /* handle arguments */
+ /* in reverse order so we could use r0 (arg1) for memory transfers */
+ for (i = sig->param_count; --i >= 0;) {
+ param = sig->params [i];
+ if (param->byref) {
+ if (i < aregs && reg_alloc[i] > 0) {
+ ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ } else {
+ simple_type = param->type;
+enum_marshal:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (i < aregs && reg_alloc [i] > 0) {
+ /* pass in register */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (i < aregs && reg_alloc [i] > 0) {
+ if (reg_alloc [i] > 1) {
+ /* pass in registers */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ }
+ } else {
+ /* two words transferred on the stack */
+ stack_offs -= 2*sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ /* it's an enum value, proceed based on its base type */
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_marshal;
+ } else {
+ if (i < aregs && reg_alloc[i] > 0) {
+ int vtreg = ARMREG_A1 + hasthis +
+ hasthis + (aregs - reg_alloc[i]);
+ ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, vtreg, vtreg, 0);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (sig->hasthis && !this_loaded) {
+ /* [this] always passed in A1, regardless of sig->call_convention */
+ ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS);
+ }
+
+ /* call [func] */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR);
+
+ /* handle retval */
+ if (sig->ret->byref || string_ctor) {
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ } else {
+ simple_type = sig->ret->type;
+enum_retvalue:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 32-bit integer and integer-equivalent return value
+ * is returned in R0.
+ * Single-precision floating-point values are returned in R0.
+ */
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 64-bit integer is returned in R0 and R1.
+ * Double-precision floating-point values are returned in R0 and R1.
+ */
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simple_type = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ break;
+ }
+ }
+
+ p = arm_emit_std_epilogue(p, stack_size,
+ /* restore R4-R7 */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ return code_buff;
+}
+
+
+
+#define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member)
+
+
+
+/*
+ * Returns a pointer to a native function that can be used to
+ * call the specified method.
+ * The function created will receive the arguments according
+ * to the call convention specified in the method.
+ * This function works by creating a MonoInvocation structure,
+ * filling the fields in and calling ves_exec_method on it.
+ * Still need to figure out how to handle the exception stuff
+ * across the managed/unmanaged boundary.
+ */
+void* mono_arch_create_method_pointer (MonoMethod* method)
+{
+ MonoMethodSignature* sig;
+ guchar* p, * p_method, * p_stackval_from_data, * p_exec;
+ void* code_buff;
+ int i, stack_size, arg_pos, arg_add, stackval_pos, offs;
+ int areg, reg_args, shift, pos;
+ MonoJitInfo *ji;
+
+ code_buff = alloc_code_buff(128);
+ p = (guchar*)code_buff;
+
+ sig = method->signature;
+
+ ARM_B(p, 3);
+
+ /* embed magic number followed by method pointer */
+ *p++ = 'M';
+ *p++ = 'o';
+ *p++ = 'n';
+ *p++ = 'o';
+ /* method ptr */
+ *(void**)p = method;
+ p_method = p;
+ p += 4;
+
+ /* call table */
+ *(void**)p = stackval_from_data;
+ p_stackval_from_data = p;
+ p += 4;
+ *(void**)p = ves_exec_method;
+ p_exec = p;
+ p += 4;
+
+ stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t);
+
+ /* prologue */
+ p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ /* R7 - ptr to stack args */
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP);
+
+ /*
+ * Initialize MonoInvocation fields, first the ones known now.
+ */
+ ARM_MOV_REG_IMM8(p, ARMREG_R4, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent));
+
+ /* Set the method pointer. */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method));
+
+ if (sig->hasthis) {
+ /* [this] in A1 */
+ ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj));
+ } else {
+ /* else set minv.obj to NULL */
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj));
+ }
+
+ /* copy args from registers to stack */
+ areg = ARMREG_A1 + sig->hasthis;
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ arg_add = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ if (areg >= ARM_NUM_ARG_REGS) break;
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos);
+ ++areg;
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (areg >= ARM_NUM_ARG_REGS) {
+ /* load second half of 64-bit arg */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t));
+ arg_add = sizeof(armword_t);
+ } else {
+ /* second half is already the register */
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t));
+ ++areg;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ arg_pos += 2 * sizeof(armword_t);
+ }
+ /* number of args passed in registers */
+ reg_args = i;
+
+
+
+ /*
+ * Calc and save stack args ptr,
+ * args follow MonoInvocation struct on the stack.
+ */
+ ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation));
+ ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args));
+
+ /* convert method args to stackvals */
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ stackval_pos = sizeof(MonoInvocation);
+ for (i = 0; i < sig->param_count; ++i) {
+ if (i < reg_args) {
+ ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos);
+ arg_pos += 2 * sizeof(armword_t);
+ } else {
+ if (arg_pos < 0) arg_pos = 0;
+ pos = arg_pos + arg_add;
+ if (pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos);
+ } else {
+ if (is_arm_const((armword_t)pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6);
+ }
+ }
+ arg_pos += sizeof(armword_t);
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ arg_pos += sizeof(armword_t);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ }
+
+ /* A2 = result */
+ if (stackval_pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A2, ARMREG_SP, stackval_pos);
+ } else {
+ if (is_arm_const((armword_t)stackval_pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)stackval_pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A2, ARMREG_SP, stackval_pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_SP, ARMREG_R6);
+ }
+ }
+
+ /* A1 = type */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_A1, (armword_t)sig->params [i]);
+
+ stackval_pos += ARG_SIZE;
+
+ offs = -(p + 2*sizeof(arminstr_t) - p_stackval_from_data);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call stackval_from_data */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+ }
+
+ /* store retval ptr */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R5, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_R5, ARMREG_SP, ARMREG_R4);
+ ARM_STR_IMM(p, ARMREG_R5, ARMREG_SP, MINV_OFFS(retval));
+
+ /*
+ * Call the method.
+ */
+ /* A1 = MonoInvocation ptr */
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_SP);
+ offs = -(p + 2*sizeof(arminstr_t) - p_exec);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call ves_exec */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+
+
+ /*
+ * Move retval into reg.
+ */
+ if (sig->ret->byref) {
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ } else {
+ switch (sig->ret->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_LDRB_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_LDRH_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ ARM_LDR_IMM(p, ARMREG_R1, ARMREG_R5, 4);
+ break;
+ case MONO_TYPE_VOID:
+ default:
+ break;
+ }
+ }
+
+
+ p = (guchar*)arm_emit_std_epilogue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ ji = g_new0(MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff);
+ ji->code_start = (gpointer) code_buff;
+
+ mono_jit_info_table_add(mono_get_root_domain (), ji);
+
+ return code_buff;
+}
+
+
+/*
+ * mono_create_method_pointer () will insert a pointer to the MonoMethod
+ * so that the interp can easily get at the data: this function will retrieve
+ * the method from the code stream.
+ */
+MonoMethod* mono_method_pointer_get (void* code)
+{
+ unsigned char* c = code;
+ /* check out magic number that follows unconditional branch */
+ if (c[4] == 'M' &&
+ c[5] == 'o' &&
+ c[6] == 'n' &&
+ c[7] == 'o') return ((MonoMethod**)code)[2];
+ return NULL;
+}
+#endif
diff --git a/src/arch/arm/vfp_macros.th b/src/arch/arm/vfp_macros.th
new file mode 100644
index 0000000..cca67dc
--- /dev/null
+++ b/src/arch/arm/vfp_macros.th
@@ -0,0 +1,15 @@
+/* -- <Op> -- */
+
+
+/* Fd := Fn <Op> Fm */
+#define ARM_VFP_<Op>D_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>D(p, rd, rn, rm) \
+ ARM_VFP_<Op>D_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_VFP_<Op>S_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>S(p, rd, rn, rm) \
+ ARM_VFP_<Op>S_COND(p, rd, rn, rm, ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpm_macros.th b/src/arch/arm/vfpm_macros.th
new file mode 100644
index 0000000..25ad721
--- /dev/null
+++ b/src/arch/arm/vfpm_macros.th
@@ -0,0 +1,14 @@
+/* -- <Op> -- */
+
+
+/* Fd := <Op> Fm */
+
+#define ARM_<Op>D_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>D(p,dreg,sreg) ARM_<Op>D_COND(p,dreg,sreg,ARMCOND_AL)
+
+#define ARM_<Op>S_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>S(p,dreg,sreg) ARM_<Op>S_COND(p,dreg,sreg,ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpops.sh b/src/arch/arm/vfpops.sh
new file mode 100755
index 0000000..bed4a9c
--- /dev/null
+++ b/src/arch/arm/vfpops.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+DYADIC="ADD SUB MUL NMUL DIV"
+MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+echo -e "/* Macros for VFP ops, auto-generated from template */\n"
+
+echo -e "\n/* dyadic */\n"
+gen "$DYADIC" vfp_macros
+
+echo -e "\n/* monadic */\n"
+gen "$MONADIC" vfpm_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* end generated */\n"
diff --git a/src/arch/arm64/.gitignore b/src/arch/arm64/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/src/arch/arm64/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/src/arch/arm64/Makefile.am b/src/arch/arm64/Makefile.am
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/arch/arm64/Makefile.am
diff --git a/src/arch/arm64/arm64-codegen.h b/src/arch/arm64/arm64-codegen.h
new file mode 100644
index 0000000..259ff96
--- /dev/null
+++ b/src/arch/arm64/arm64-codegen.h
@@ -0,0 +1,3 @@
+#include "../../../../mono-extensions/mono/arch/arm64/arm64-codegen.h"
+
+
diff --git a/src/arch/ia64/.gitignore b/src/arch/ia64/.gitignore
new file mode 100644
index 0000000..b336cc7
--- /dev/null
+++ b/src/arch/ia64/.gitignore
@@ -0,0 +1,2 @@
+/Makefile
+/Makefile.in
diff --git a/src/arch/ia64/Makefile.am b/src/arch/ia64/Makefile.am
new file mode 100644
index 0000000..e03ea47
--- /dev/null
+++ b/src/arch/ia64/Makefile.am
@@ -0,0 +1,3 @@
+EXTRA_DIST = ia64-codegen.h
+
+
diff --git a/src/arch/ia64/codegen.c b/src/arch/ia64/codegen.c
new file mode 100644
index 0000000..97e1aef
--- /dev/null
+++ b/src/arch/ia64/codegen.c
@@ -0,0 +1,861 @@
+/*
+ * codegen.c: Tests for the IA64 code generation macros
+ */
+
+#include <glib.h>
+#include <stdio.h>
+#include <ctype.h>
+
+#define IA64_SIMPLE_EMIT_BUNDLE
+
+#include <mono/arch/ia64/ia64-codegen.h>
+
+void
+mono_disassemble_code (guint8 *code, int size, char *id)
+{
+ int i;
+ FILE *ofd;
+ const char *tmp = g_get_tmp_dir ();
+ const char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS");
+ char *as_file;
+ char *o_file;
+ char *cmd;
+
+ as_file = g_strdup_printf ("%s/test.s", tmp);
+
+ if (!(ofd = fopen (as_file, "w")))
+ g_assert_not_reached ();
+
+ for (i = 0; id [i]; ++i) {
+ if (!isalnum (id [i]))
+ fprintf (ofd, "_");
+ else
+ fprintf (ofd, "%c", id [i]);
+ }
+ fprintf (ofd, ":\n");
+
+ for (i = 0; i < size; ++i)
+ fprintf (ofd, ".byte %d\n", (unsigned int) code [i]);
+
+ fclose (ofd);
+
+#ifdef __ia64__
+#define DIS_CMD "objdump -d"
+#define AS_CMD "as"
+#else
+#define DIS_CMD "ia64-linux-gnu-objdump -d"
+#define AS_CMD "ia64-linux-gnu-as"
+#endif
+
+ o_file = g_strdup_printf ("%s/test.o", tmp);
+ cmd = g_strdup_printf (AS_CMD " %s -o %s", as_file, o_file);
+ system (cmd);
+ g_free (cmd);
+ if (!objdump_args)
+ objdump_args = "";
+
+ cmd = g_strdup_printf (DIS_CMD " %s %s", objdump_args, o_file);
+ system (cmd);
+ g_free (cmd);
+
+ g_free (o_file);
+ g_free (as_file);
+}
+
+int
+main ()
+{
+ Ia64CodegenState code;
+
+ guint8 *buf = g_malloc0 (40960);
+
+ ia64_codegen_init (code, buf);
+
+ ia64_add (code, 1, 2, 3);
+ ia64_add1 (code, 1, 2, 3);
+ ia64_sub (code, 1, 2, 3);
+ ia64_sub1 (code, 1, 2, 3);
+ ia64_addp4 (code, 1, 2, 3);
+ ia64_and (code, 1, 2, 3);
+ ia64_andcm (code, 1, 2, 3);
+ ia64_or (code, 1, 2, 3);
+ ia64_xor (code, 1, 2, 3);
+ ia64_shladd (code, 1, 2, 3, 4);
+ ia64_shladdp4 (code, 1, 2, 3, 4);
+ ia64_sub_imm (code, 1, 0x7f, 2);
+ ia64_sub_imm (code, 1, -1, 2);
+ ia64_and_imm (code, 1, -128, 2);
+ ia64_andcm_imm (code, 1, -128, 2);
+ ia64_or_imm (code, 1, -128, 2);
+ ia64_xor_imm (code, 1, -128, 2);
+ ia64_adds_imm (code, 1, 8191, 2);
+ ia64_adds_imm (code, 1, -8192, 2);
+ ia64_adds_imm (code, 1, 1234, 2);
+ ia64_adds_imm (code, 1, -1234, 2);
+ ia64_addp4_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 1234, 2);
+ ia64_addl_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 2097151, 2);
+ ia64_addl_imm (code, 1, -2097152, 2);
+
+ ia64_cmp_lt (code, 1, 2, 1, 2);
+ ia64_cmp_ltu (code, 1, 2, 1, 2);
+ ia64_cmp_eq (code, 1, 2, 1, 2);
+ ia64_cmp_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp4_lt (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu (code, 1, 2, 1, 2);
+ ia64_cmp4_eq (code, 1, 2, 1, 2);
+ ia64_cmp4_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_le_and (code, 1, 2, 0, 2);
+ ia64_cmp_le_or (code, 1, 2, 0, 2);
+ ia64_cmp_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp4_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_le_and (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, 127, 2);
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp4_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_padd1 (code, 1, 2, 3);
+ ia64_padd2 (code, 1, 2, 3);
+ ia64_padd4 (code, 1, 2, 3);
+ ia64_padd1_sss (code, 1, 2, 3);
+ ia64_padd2_sss (code, 1, 2, 3);
+ ia64_padd1_uuu (code, 1, 2, 3);
+ ia64_padd2_uuu (code, 1, 2, 3);
+ ia64_padd1_uus (code, 1, 2, 3);
+ ia64_padd2_uus (code, 1, 2, 3);
+
+ ia64_psub1 (code, 1, 2, 3);
+ ia64_psub2 (code, 1, 2, 3);
+ ia64_psub4 (code, 1, 2, 3);
+ ia64_psub1_sss (code, 1, 2, 3);
+ ia64_psub2_sss (code, 1, 2, 3);
+ ia64_psub1_uuu (code, 1, 2, 3);
+ ia64_psub2_uuu (code, 1, 2, 3);
+ ia64_psub1_uus (code, 1, 2, 3);
+ ia64_psub2_uus (code, 1, 2, 3);
+
+ ia64_pavg1 (code, 1, 2, 3);
+ ia64_pavg2 (code, 1, 2, 3);
+ ia64_pavg1_raz (code, 1, 2, 3);
+ ia64_pavg2_raz (code, 1, 2, 3);
+ ia64_pavgsub1 (code, 1, 2, 3);
+ ia64_pavgsub2 (code, 1, 2, 3);
+ ia64_pcmp1_eq (code, 1, 2, 3);
+ ia64_pcmp2_eq (code, 1, 2, 3);
+ ia64_pcmp4_eq (code, 1, 2, 3);
+ ia64_pcmp1_gt (code, 1, 2, 3);
+ ia64_pcmp2_gt (code, 1, 2, 3);
+ ia64_pcmp4_gt (code, 1, 2, 3);
+
+ ia64_pshladd2 (code, 1, 2, 3, 4);
+ ia64_pshradd2 (code, 1, 2, 3, 4);
+
+ ia64_pmpyshr2 (code, 1, 2, 3, 0);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 0);
+ ia64_pmpyshr2 (code, 1, 2, 3, 7);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 7);
+ ia64_pmpyshr2 (code, 1, 2, 3, 15);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 15);
+ ia64_pmpyshr2 (code, 1, 2, 3, 16);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 16);
+
+ ia64_pmpy2_r (code, 1, 2, 3);
+ ia64_pmpy2_l (code, 1, 2, 3);
+ ia64_mix1_r (code, 1, 2, 3);
+ ia64_mix2_r (code, 1, 2, 3);
+ ia64_mix4_r (code, 1, 2, 3);
+ ia64_mix1_l (code, 1, 2, 3);
+ ia64_mix2_l (code, 1, 2, 3);
+ ia64_mix4_l (code, 1, 2, 3);
+ ia64_pack2_uss (code, 1, 2, 3);
+ ia64_pack2_sss (code, 1, 2, 3);
+ ia64_pack4_sss (code, 1, 2, 3);
+ ia64_unpack1_h (code, 1, 2, 3);
+ ia64_unpack2_h (code, 1, 2, 3);
+ ia64_unpack4_h (code, 1, 2, 3);
+ ia64_unpack1_l (code, 1, 2, 3);
+ ia64_unpack2_l (code, 1, 2, 3);
+ ia64_unpack4_l (code, 1, 2, 3);
+ ia64_pmin1_u (code, 1, 2, 3);
+ ia64_pmax1_u (code, 1, 2, 3);
+ ia64_pmin2 (code, 1, 2, 3);
+ ia64_pmax2 (code, 1, 2, 3);
+ ia64_psad1 (code, 1, 2, 3);
+
+ ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_MIX);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_ALT);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_REV);
+
+ ia64_mux2 (code, 1, 2, 0x8d);
+
+ ia64_pshr2 (code, 1, 2, 3);
+ ia64_pshr4 (code, 1, 2, 3);
+ ia64_shr (code, 1, 2, 3);
+ ia64_pshr2_u (code, 1, 2, 3);
+ ia64_pshr4_u (code, 1, 2, 3);
+ ia64_shr_u (code, 1, 2, 3);
+
+ ia64_pshr2_imm (code, 1, 2, 20);
+ ia64_pshr4_imm (code, 1, 2, 20);
+ ia64_pshr2_u_imm (code, 1, 2, 20);
+ ia64_pshr4_u_imm (code, 1, 2, 20);
+
+ ia64_pshl2 (code, 1, 2, 3);
+ ia64_pshl4 (code, 1, 2, 3);
+ ia64_shl (code, 1, 2, 3);
+
+ ia64_pshl2_imm (code, 1, 2, 20);
+ ia64_pshl4_imm (code, 1, 2, 20);
+
+ ia64_popcnt (code, 1, 2);
+
+ ia64_shrp (code, 1, 2, 3, 62);
+
+ ia64_extr_u (code, 1, 2, 62, 61);
+ ia64_extr (code, 1, 2, 62, 61);
+
+ ia64_dep_z (code, 1, 2, 62, 61);
+
+ ia64_dep_z_imm (code, 1, 127, 62, 61);
+ ia64_dep_z_imm (code, 1, -128, 62, 61);
+ ia64_dep_imm (code, 1, 0, 2, 62, 61);
+ ia64_dep_imm (code, 1, -1, 2, 62, 61);
+ ia64_dep (code, 1, 2, 3, 10, 15);
+
+ ia64_tbit_z (code, 1, 2, 3, 0);
+
+ ia64_tbit_z (code, 1, 2, 3, 63);
+ ia64_tbit_z_unc (code, 1, 2, 3, 63);
+ ia64_tbit_z_and (code, 1, 2, 3, 63);
+ ia64_tbit_nz_and (code, 1, 2, 3, 63);
+ ia64_tbit_z_or (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or (code, 1, 2, 3, 63);
+ ia64_tbit_z_or_andcm (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63);
+
+ ia64_tnat_z (code, 1, 2, 3);
+ ia64_tnat_z_unc (code, 1, 2, 3);
+ ia64_tnat_z_and (code, 1, 2, 3);
+ ia64_tnat_nz_and (code, 1, 2, 3);
+ ia64_tnat_z_or (code, 1, 2, 3);
+ ia64_tnat_nz_or (code, 1, 2, 3);
+ ia64_tnat_z_or_andcm (code, 1, 2, 3);
+ ia64_tnat_nz_or_andcm (code, 1, 2, 3);
+
+ ia64_nop_i (code, 0x1234);
+ ia64_hint_i (code, 0x1234);
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_chk_s_i (code, 1, 0);
+ ia64_chk_s_i (code, 1, -1);
+ ia64_chk_s_i (code, 1, 1);
+
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP);
+ ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+
+ ia64_mov_from_br (code, 1, 1);
+
+ ia64_mov_to_pred (code, 1, 0xfe);
+
+ ia64_mov_to_pred_rot_imm (code, 0xff0000);
+
+ ia64_mov_from_ip (code, 1);
+ ia64_mov_from_pred (code, 1);
+
+ ia64_mov_to_ar_i (code, 1, 1);
+
+ ia64_mov_to_ar_imm_i (code, 1, 127);
+
+ ia64_mov_from_ar_i (code, 1, 1);
+
+ ia64_zxt1 (code, 1, 2);
+ ia64_zxt2 (code, 1, 2);
+ ia64_zxt4 (code, 1, 2);
+ ia64_sxt1 (code, 1, 2);
+ ia64_sxt2 (code, 1, 2);
+ ia64_sxt4 (code, 1, 2);
+
+ ia64_czx1_l (code, 1, 2);
+ ia64_czx2_l (code, 1, 2);
+ ia64_czx1_r (code, 1, 2);
+ ia64_czx2_r (code, 1, 2);
+
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA);
+
+ ia64_ld1_hint (code, 1, 2, 0);
+ ia64_ld2_hint (code, 1, 2, 0);
+ ia64_ld4_hint (code, 1, 2, 0);
+ ia64_ld8_hint (code, 1, 2, 0);
+
+ ia64_ld1_s_hint (code, 1, 2, 0);
+ ia64_ld2_s_hint (code, 1, 2, 0);
+ ia64_ld4_s_hint (code, 1, 2, 0);
+ ia64_ld8_s_hint (code, 1, 2, 0);
+
+ ia64_ld1_a_hint (code, 1, 2, 0);
+ ia64_ld2_a_hint (code, 1, 2, 0);
+ ia64_ld4_a_hint (code, 1, 2, 0);
+ ia64_ld8_a_hint (code, 1, 2, 0);
+
+ ia64_ld1_sa_hint (code, 1, 2, 0);
+ ia64_ld2_sa_hint (code, 1, 2, 0);
+ ia64_ld4_sa_hint (code, 1, 2, 0);
+ ia64_ld8_sa_hint (code, 1, 2, 0);
+
+ ia64_ld1_bias_hint (code, 1, 2, 0);
+ ia64_ld2_bias_hint (code, 1, 2, 0);
+ ia64_ld4_bias_hint (code, 1, 2, 0);
+ ia64_ld8_bias_hint (code, 1, 2, 0);
+
+ ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE);
+
+ ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE);
+ ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_ldfs_hint (code, 1, 2, 0);
+ ia64_ldfd_hint (code, 1, 2, 0);
+ ia64_ldf8_hint (code, 1, 2, 0);
+ ia64_ldfe_hint (code, 1, 2, 0);
+
+ ia64_ldfs_s_hint (code, 1, 2, 0);
+ ia64_ldfd_s_hint (code, 1, 2, 0);
+ ia64_ldf8_s_hint (code, 1, 2, 0);
+ ia64_ldfe_s_hint (code, 1, 2, 0);
+
+ ia64_ldfs_a_hint (code, 1, 2, 0);
+ ia64_ldfd_a_hint (code, 1, 2, 0);
+ ia64_ldf8_a_hint (code, 1, 2, 0);
+ ia64_ldfe_a_hint (code, 1, 2, 0);
+
+ ia64_ldfs_sa_hint (code, 1, 2, 0);
+ ia64_ldfd_sa_hint (code, 1, 2, 0);
+ ia64_ldf8_sa_hint (code, 1, 2, 0);
+ ia64_ldfe_sa_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfd_c_clr_hint (code, 1, 2, 0);
+ ia64_ldf8_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfe_c_clr_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfd_c_nc_hint (code, 1, 2, 0);
+ ia64_ldf8_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfe_c_nc_hint (code, 1, 2, 0);
+
+ ia64_ldf_fill_hint (code, 1, 2, 0);
+
+ ia64_ldfs_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stfs_hint (code, 1, 2, 0);
+ ia64_stfd_hint (code, 1, 2, 0);
+ ia64_stf8_hint (code, 1, 2, 0);
+ ia64_stfe_hint (code, 1, 2, 0);
+
+ ia64_stf_spill_hint (code, 1, 2, 0);
+
+ ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfps_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_lfetch_hint (code, 1, 0);
+ ia64_lfetch_excl_hint (code, 1, 0);
+ ia64_lfetch_fault_hint (code, 1, 0);
+ ia64_lfetch_fault_excl_hint (code, 1, 0);
+
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA);
+
+ ia64_lfetch_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_excl_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0);
+
+ ia64_lfetch_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0);
+
+ ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0);
+ ia64_xchg1_hint (code, 1, 2, 3, 0);
+ ia64_xchg2_hint (code, 1, 2, 3, 0);
+ ia64_xchg4_hint (code, 1, 2, 3, 0);
+ ia64_xchg8_hint (code, 1, 2, 3, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0);
+
+ ia64_setf_sig (code, 1, 2);
+ ia64_setf_exp (code, 1, 2);
+ ia64_setf_s (code, 1, 2);
+ ia64_setf_d (code, 1, 2);
+
+ ia64_getf_sig (code, 1, 2);
+ ia64_getf_exp (code, 1, 2);
+ ia64_getf_s (code, 1, 2);
+ ia64_getf_d (code, 1, 2);
+
+ ia64_chk_s_m (code, 1, 0);
+ ia64_chk_s_m (code, 1, 1);
+ ia64_chk_s_m (code, 1, -1);
+
+ ia64_chk_s_float_m (code, 1, 0);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_nc (code, 1, 1);
+ ia64_chk_a_nc (code, 1, -1);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_clr (code, 1, 0);
+
+ ia64_chk_a_nc_float (code, 1, 0);
+ ia64_chk_a_clr_float (code, 1, 0);
+
+ ia64_invala (code);
+ ia64_fwb (code);
+ ia64_mf (code);
+ ia64_mf_a (code);
+ ia64_srlz_d (code);
+ ia64_stlz_i (code);
+ ia64_sync_i (code);
+
+ ia64_flushrs (code);
+ ia64_loadrs (code);
+
+ ia64_invala_e (code, 1);
+ ia64_invala_e_float (code, 1);
+
+ ia64_fc (code, 1);
+ ia64_fc_i (code, 1);
+
+ ia64_mov_to_ar_m (code, 1, 1);
+
+ ia64_mov_to_ar_imm_m (code, 1, 127);
+
+ ia64_mov_from_ar_m (code, 1, 1);
+
+ ia64_mov_to_cr (code, 1, 2);
+
+ ia64_mov_from_cr (code, 1, 2);
+
+ ia64_alloc (code, 1, 3, 4, 5, 0);
+ ia64_alloc (code, 1, 3, 4, 5, 8);
+
+ ia64_mov_to_psr_l (code, 1);
+ ia64_mov_to_psr_um (code, 1);
+
+ ia64_mov_from_psr (code, 1);
+ ia64_mov_from_psr_um (code, 1);
+
+ ia64_break_m (code, 0x1234);
+ ia64_nop_m (code, 0x1234);
+ ia64_hint_m (code, 0x1234);
+
+ ia64_br_cond_hint (code, 0, 0, 0, 0);
+ ia64_br_wexit_hint (code, 0, 0, 0, 0);
+ ia64_br_wtop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_cloop_hint (code, 0, 0, 0, 0);
+ ia64_br_cexit_hint (code, 0, 0, 0, 0);
+ ia64_br_ctop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_call_hint (code, 1, 0, 0, 0, 0);
+
+ ia64_br_cond_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ia_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ret_reg_hint (code, 1, 0, 0, 0);
+
+ ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0);
+
+ ia64_cover (code);
+ ia64_clrrrb (code);
+ ia64_clrrrb_pr (code);
+ ia64_rfi (code);
+ ia64_bsw_0 (code);
+ ia64_bsw_1 (code);
+ ia64_epc (code);
+
+ ia64_break_b (code, 0x1234);
+ ia64_nop_b (code, 0x1234);
+ ia64_hint_b (code, 0x1234);
+
+ ia64_break_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl (code, 1, 0x123456789ABCDEF0LL);
+
+ ia64_brl_cond_hint (code, 0, 0, 0, 0);
+ ia64_brl_cond_hint (code, -1, 0, 0, 0);
+
+ ia64_brl_call_hint (code, 1, 0, 0, 0, 0);
+ ia64_brl_call_hint (code, 1, -1, 0, 0, 0);
+
+ ia64_nop_x (code, 0x2123456789ABCDEFULL);
+ ia64_hint_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL);
+
+ /* FLOATING-POINT */
+ ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+
+ ia64_xma_l_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_h_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_hu_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fselect_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff);
+ ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff);
+
+ ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+ ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+
+ ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0);
+
+ ia64_fmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_se_pred (code, 1, 2, 3, 4);
+ ia64_fmix_lr_pred (code, 1, 2, 3, 4);
+ ia64_fmix_r_pred (code, 1, 2, 3, 4);
+ ia64_fmix_l_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_r_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_l_pred (code, 1, 2, 3, 4);
+ ia64_fpack_pred (code, 1, 2, 3, 4);
+ ia64_fswap_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nl_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nr_pred (code, 1, 2, 3, 4);
+ ia64_fand_pred (code, 1, 2, 3, 4);
+ ia64_fandcm_pred (code, 1, 2, 3, 4);
+ ia64_for_pred (code, 1, 2, 3, 4);
+ ia64_fxor_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_se_pred (code, 1, 2, 3, 4);
+
+ ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+
+ ia64_fcvt_xf_pred ((code), 1, 2, 3);
+
+ ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3);
+
+ ia64_fclrf_sf_pred ((code), 1, 3);
+
+ ia64_fchkf_sf_pred ((code), 1, -1, 3);
+
+ ia64_break_f_pred ((code), 1, 0x1234);
+
+ ia64_movl (code, 31, -123456);
+
+ ia64_codegen_close (code);
+
+#if 0
+ /* disassembly */
+ {
+ guint8 *buf = code.buf;
+ int template;
+ guint64 dw1, dw2;
+ guint64 ins1, ins2, ins3;
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_codegen_close (code);
+
+ dw1 = ((guint64*)buf) [0];
+ dw2 = ((guint64*)buf) [1];
+
+ template = ia64_bundle_template (buf);
+ ins1 = ia64_bundle_ins1 (buf);
+ ins2 = ia64_bundle_ins2 (buf);
+ ins3 = ia64_bundle_ins3 (buf);
+
+ code.buf = buf;
+ ia64_emit_bundle_template (&code, template, ins1, ins2, ins3);
+
+ g_assert (dw1 == ((guint64*)buf) [0]);
+ g_assert (dw2 == ((guint64*)buf) [1]);
+ }
+#endif
+
+ mono_disassemble_code (buf, 40960, "code");
+
+ return 0;
+}
diff --git a/src/arch/ia64/ia64-codegen.h b/src/arch/ia64/ia64-codegen.h
new file mode 100644
index 0000000..1793580
--- /dev/null
+++ b/src/arch/ia64/ia64-codegen.h
@@ -0,0 +1,3183 @@
+/*
+ * ia64-codegen.h: Macros for generating ia64 code
+ *
+ * Authors:
+ * Zoltan Varga (vargaz@gmail.com)
+ *
+ * (C) 2005 Novell, Inc.
+ */
+
+#ifndef _IA64_CODEGEN_H_
+#define _IA64_CODEGEN_H_
+
+#include <glib.h>
+#include <string.h>
+
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+
+typedef enum {
+ IA64_INS_TYPE_A,
+ IA64_INS_TYPE_I,
+ IA64_INS_TYPE_M,
+ IA64_INS_TYPE_F,
+ IA64_INS_TYPE_B,
+ IA64_INS_TYPE_LX
+} Ia64InsType;
+
+typedef enum {
+ IA64_TEMPLATE_MII = 0x00,
+ IA64_TEMPLATE_MIIS = 0x01,
+ IA64_TEMPLATE_MISI = 0x02,
+ IA64_TEMPLATE_MISIS = 0x03,
+ IA64_TEMPLATE_MLX = 0x04,
+ IA64_TEMPLATE_MLXS = 0x05,
+ IA64_TEMPLATE_UNUS1 = 0x06,
+ IA64_TEMPLATE_UNUS2 = 0x07,
+ IA64_TEMPLATE_MMI = 0x08,
+ IA64_TEMPLATE_MMIS = 0x09,
+ IA64_TEMPLATE_MSMI = 0x0A,
+ IA64_TEMPLATE_MSMIS = 0x0B,
+ IA64_TEMPLATE_MFI = 0x0C,
+ IA64_TEMPLATE_MFIS = 0x0D,
+ IA64_TEMPLATE_MMF = 0x0E,
+ IA64_TEMPLATE_MMFS = 0x0F,
+ IA64_TEMPLATE_MIB = 0x10,
+ IA64_TEMPLATE_MIBS = 0x11,
+ IA64_TEMPLATE_MBB = 0x12,
+ IA64_TEMPLATE_MBBS = 0x13,
+ IA64_TEMPLATE_UNUS3 = 0x14,
+ IA64_TEMPLATE_UNUS4 = 0x15,
+ IA64_TEMPLATE_BBB = 0x16,
+ IA64_TEMPLATE_BBBS = 0x17,
+ IA64_TEMPLATE_MMB = 0x18,
+ IA64_TEMPLATE_MMBS = 0x19,
+ IA64_TEMPLATE_UNUS5 = 0x1A,
+ IA64_TEMPLATE_UNUS6 = 0x1B,
+ IA64_TEMPLATE_MFB = 0x1C,
+ IA64_TEMPLATE_MFBS = 0x1D,
+ IA64_TEMPLATE_UNUS7 = 0x1E,
+ IA64_TEMPLATE_UNUS8 = 0x1F,
+} Ia64BundleTemplate;
+
+typedef enum {
+ IA64_R0 = 0,
+ IA64_R1 = 1,
+ IA64_R2 = 2,
+ IA64_R3 = 3,
+ IA64_R4 = 4,
+ IA64_R5 = 5,
+ IA64_R6 = 6,
+ IA64_R7 = 7,
+ IA64_R8 = 8,
+ IA64_R9 = 9,
+ IA64_R10 = 10,
+ IA64_R11 = 11,
+ IA64_R12 = 12,
+ IA64_R13 = 13,
+ IA64_R14 = 14,
+ IA64_R15 = 15,
+ IA64_R16 = 16,
+ IA64_R17 = 17,
+ IA64_R18 = 18,
+ IA64_R19 = 19,
+ IA64_R20 = 20,
+ IA64_R21 = 21,
+ IA64_R22 = 22,
+ IA64_R23 = 23,
+ IA64_R24 = 24,
+ IA64_R25 = 25,
+ IA64_R26 = 26,
+ IA64_R27 = 27,
+ IA64_R28 = 28,
+ IA64_R29 = 29,
+ IA64_R30 = 30,
+ IA64_R31 = 31,
+
+ /* Aliases */
+ IA64_GP = IA64_R1,
+ IA64_SP = IA64_R12,
+ IA64_TP = IA64_R13
+} Ia64GeneralRegister;
+
+typedef enum {
+ IA64_B0 = 0,
+ IA64_B1 = 1,
+ IA64_B2 = 2,
+ IA64_B3 = 3,
+ IA64_B4 = 4,
+ IA64_B5 = 5,
+ IA64_B6 = 6,
+ IA64_B7 = 7,
+
+ /* Aliases */
+ IA64_RP = IA64_B0
+} Ia64BranchRegister;
+
+typedef enum {
+ IA64_CCV = 32,
+ IA64_PFS = 64
+} Ia64ApplicationRegister;
+
+/* disassembly */
+#define ia64_bundle_template(code) ((*(guint64*)(gpointer)code) & 0x1f)
+#define ia64_bundle_ins1(code) (((*(guint64*)(gpointer)code) >> 5) & 0x1ffffffffff)
+#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x7fffff) << 18))
+#define ia64_bundle_ins3(code) ((((guint64*)(gpointer)code)[1]) >> 23)
+
+#define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37)
+#define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f)
+#define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f)
+#define ia64_ins_r2(ins) ((((guint64)(ins)) >> 13) & 0x7f)
+#define ia64_ins_r3(ins) ((((guint64)(ins)) >> 20) & 0x7f)
+
+#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_b2(ins) ((((guint64)(ins)) >> 13) & 0x7)
+#define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1)
+#define ia64_ins_x2a(ins) ((((guint64)(ins)) >> 34) & 0x3)
+#define ia64_ins_x2b(ins) ((((guint64)(ins)) >> 27) & 0x3)
+#define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7)
+#define ia64_ins_x4(ins) ((((guint64)(ins)) >> 29) & 0xf)
+#define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f)
+#define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1)
+#define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1)
+#define ia64_ins_ve(ins) ((((guint64)(ins)) >> 33) & 0x1)
+
+#define IA64_NOP_I ((0x01 << 27))
+#define IA64_NOP_M ((0x01 << 27))
+#define IA64_NOP_B (((long)0x02 << 37))
+#define IA64_NOP_F ((0x01 << 27))
+#define IA64_NOP_X ((0x01 << 27))
+
+/*
+ * READ_PR_BRANCH and WRITE_PR_FLOAT are used to be able to place comparisons
+ * + branches in the same instruction group.
+ */
+typedef enum {
+ IA64_READ_GR,
+ IA64_WRITE_GR,
+ IA64_READ_PR,
+ IA64_WRITE_PR,
+ IA64_READ_PR_BRANCH,
+ IA64_WRITE_PR_FLOAT,
+ IA64_READ_BR,
+ IA64_WRITE_BR,
+ IA64_READ_BR_BRANCH,
+ IA64_READ_FR,
+ IA64_WRITE_FR,
+ IA64_READ_AR,
+ IA64_WRITE_AR,
+ IA64_NO_STOP,
+ IA64_END_OF_INS,
+ IA64_NONE
+} Ia64Dependency;
+
+/*
+ * IA64 code cannot be emitted in the same way as code on other processors,
+ * since 3 instructions are combined into a bundle. This structure keeps track
+ * of already emitted instructions.
+ *
+ */
+
+#define IA64_INS_BUFFER_SIZE 4
+#define MAX_UNW_OPS 8
+
+typedef struct {
+ guint8 *buf;
+ guint one_ins_per_bundle : 1;
+ int nins, template, dep_info_pos, unw_op_pos, unw_op_count;
+ guint64 instructions [IA64_INS_BUFFER_SIZE];
+ int itypes [IA64_INS_BUFFER_SIZE];
+ guint8 *region_start;
+ guint8 dep_info [128];
+ unw_dyn_op_t unw_ops [MAX_UNW_OPS];
+ /* The index of the instruction to which the given unw op belongs */
+ guint8 unw_ops_pos [MAX_UNW_OPS];
+} Ia64CodegenState;
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#else
+void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#endif
+
+#define ia64_codegen_init(code, codegen_buf) do { \
+ code.buf = codegen_buf; \
+ code.region_start = code.buf; \
+ code.nins = 0; \
+ code.one_ins_per_bundle = 0; \
+ code.dep_info_pos = 0; \
+ code.unw_op_count = 0; \
+ code.unw_op_pos = 0; \
+} while (0)
+
+#define ia64_codegen_close(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_begin_bundle(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_codegen_set_one_ins_per_bundle(code, is_one) do { \
+ ia64_begin_bundle (code); \
+ code.one_ins_per_bundle = (is_one); \
+} while (0)
+
+#define ia64_begin_bundle_template(code, bundle_template) do { \
+ ia64_emit_bundle (&code, TRUE); \
+ code.template = (bundle_template); \
+} while (0)
+
+#define ia64_unw_save_reg(code, reg, dreg) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_save_reg (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, -1, reg, dreg); \
+} while (0)
+
+#define ia64_unw_add(code, reg, val) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_add (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, reg, val); \
+} while (0)
+
+#define ia64_unw_pop_frames(code, nframes) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_pop_frames (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, (nframes)); \
+} while (0)
+
+#define ia64_unw_label_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_label_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+
+#define ia64_unw_copy_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_copy_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+#if 0
+/* To ease debugging, emit instructions immediately */
+#define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#else
+#define EMIT_BUNDLE(itype, code) if ((itype == IA64_INS_TYPE_LX) && (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#endif
+
+#define ia64_emit_ins(code, itype, ins) do { \
+ code.instructions [code.nins] = ins; \
+ code.itypes [code.nins] = itype; \
+ code.nins ++; \
+ code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+ EMIT_BUNDLE (itype, code); \
+ if (code.nins == IA64_INS_BUFFER_SIZE) \
+ ia64_emit_bundle (&code, FALSE); \
+} while (0)
+
+#define ia64_no_stop(code) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_NO_STOP; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+} while (0)
+
+#if G_BYTE_ORDER != G_LITTLE_ENDIAN
+#error "FIXME"
+#endif
+
+#define ia64_emit_bundle_template(code, template, i1, i2, i3) do { \
+ guint64 *buf64 = (guint64*)(gpointer)(code)->buf; \
+ guint64 dw1, dw2; \
+ dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \
+ dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \
+ buf64[0] = dw1; \
+ buf64[1] = dw2; \
+ (code)->buf += 16; \
+} while (0)
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+
+G_GNUC_UNUSED static void
+ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
+{
+ int i;
+
+ for (i = 0; i < code->nins; ++i) {
+ switch (code->itypes [i]) {
+ case IA64_INS_TYPE_A:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_I:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_M:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_B:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [i]);
+ break;
+ case IA64_INS_TYPE_F:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_LX:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [i], code->instructions [i + 1]);
+ i ++;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ code->nins = 0;
+ code->dep_info_pos = 0;
+}
+
+#endif /* IA64_SIMPLE_EMIT_BUNDLE */
+
+#define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define ia64_is_imm21(imm) (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+
+#define ia64_is_adds_imm(imm) ia64_is_imm14((imm))
+
+#if 1
+
+#define check_assert(cond) g_assert((cond))
+
+#else
+
+#define check_assert(cond)
+
+#endif
+
+#define check_greg(gr) check_assert ((guint64)(gr) < 128)
+
+#define check_freg(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_fr(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_preg(pr) check_assert ((guint64)(pr) < 64)
+
+#define check_breg(pr) check_assert ((guint64)(pr) < 8)
+
+#define check_count2(count) check_assert (((count) >= 1) && ((count) <= 4))
+
+#define check_count5(count) check_assert (((count) >= 0) && ((count) < 32))
+
+#define check_count6(count) check_assert (((count) >= 0) && ((count) < 64))
+
+#define check_imm1(imm) check_assert (((gint64)(imm) >= -1) && ((gint64)(imm) <= 0))
+#define check_imm3(imm) check_assert (((gint64)(imm) >= -4) && ((gint64)(imm) <= 3))
+#define check_imm8(imm) check_assert (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define check_imm9(imm) check_assert (((gint64)(imm) >= -256) && ((gint64)(imm) <= 255))
+#define check_imm14(imm) check_assert (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1)))
+#define check_imm62(imm) check_assert (((gint64)(imm) >= -0x2fffffffffffffffLL) && ((gint64)(imm) <= (0x2fffffffffffffffLL - 1)))
+
+#define check_len4(len) check_assert (((gint64)(len) >= 1) && ((gint64)(len) <= 16))
+
+#define check_bwh(bwh) check_assert ((bwh) >= 0 && (bwh) <= IA64_BWH_DPNT)
+
+#define check_ph(ph) check_assert ((ph) >= 0 && (ph) <= IA64_PH_MANY)
+
+#define check_dh(dh) check_assert ((dh) >= 0 && (dh) <= IA64_DH_CLR)
+
+#define check_sf(sf) check_assert ((sf) >= 0 && (sf) <= 3)
+
+#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0)
+
+/* Dependency info */
+#define read_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define write_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define read_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define write_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define read_pr_branch(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_pr_fp(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR_FLOAT; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br_branch(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define ia64_emit_ins_1(code,itype,f1,o1) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1))))
+
+#define ia64_emit_ins_3(code,itype,f1,o1,f2,o2,f3,o3) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3))))
+
+#define ia64_emit_ins_5(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5))))
+
+#define ia64_emit_ins_6(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6))))
+
+#define ia64_emit_ins_7(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7))))
+
+#define ia64_emit_ins_8(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8))))
+
+#define ia64_emit_ins_9(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9))))
+
+#define ia64_emit_ins_10(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10))))
+
+#define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11))))
+
+/*
+ * A-Unit instructions
+ */
+
+#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0)
+#define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1)
+#define ia64_sub_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 1)
+#define ia64_sub1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 0)
+#define ia64_addp4_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 2, 0)
+#define ia64_and_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 0)
+#define ia64_andcm_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 1)
+#define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2)
+#define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3)
+
+#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count))
+#define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count))
+
+#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0)
+
+#define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1)
+#define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0)
+#define ia64_andcm_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 1)
+#define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2)
+#define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3)
+
+#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0)
+
+#define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0)
+#define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0)
+
+#define ia64_a5(code, qp, r1, imm, r3) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0)
+
+#define ia64_addl_imm_pred(code, qp, r1, imm22, r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3))
+
+#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0)
+#define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0)
+#define ia64_cmp_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 0)
+#define ia64_cmp_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 1)
+#define ia64_cmp_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 1)
+#define ia64_cmp_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 1)
+#define ia64_cmp_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 0)
+#define ia64_cmp_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 1)
+
+#define ia64_cmp4_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 0)
+#define ia64_cmp4_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 0)
+#define ia64_cmp4_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 0)
+#define ia64_cmp4_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 1)
+#define ia64_cmp4_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 1)
+#define ia64_cmp4_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 1)
+#define ia64_cmp4_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 0)
+#define ia64_cmp4_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 0)
+#define ia64_cmp_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 1)
+#define ia64_cmp_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 1)
+#define ia64_cmp_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 1)
+#define ia64_cmp_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 0)
+#define ia64_cmp_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 1)
+
+#define ia64_cmp4_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 0)
+#define ia64_cmp4_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 1)
+#define ia64_cmp4_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 0)
+#define ia64_cmp4_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1)
+
+#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0)
+#define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0)
+#define ia64_cmp_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 0)
+#define ia64_cmp_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 1)
+#define ia64_cmp_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 1)
+#define ia64_cmp_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 1)
+#define ia64_cmp_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 0)
+#define ia64_cmp_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 0)
+#define ia64_cmp_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 0)
+#define ia64_cmp_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 1)
+#define ia64_cmp_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 1)
+#define ia64_cmp_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 1)
+
+#define ia64_cmp4_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 0)
+#define ia64_cmp4_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 0)
+#define ia64_cmp4_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 0)
+#define ia64_cmp4_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 1)
+#define ia64_cmp4_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 1)
+#define ia64_cmp4_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 1)
+#define ia64_cmp4_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 0)
+#define ia64_cmp4_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 0)
+#define ia64_cmp4_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 0)
+#define ia64_cmp4_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 1)
+#define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1)
+#define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_cmp4_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0)
+#define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0)
+#define ia64_padd4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0)
+#define ia64_padd1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 1)
+#define ia64_padd2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 1)
+#define ia64_padd1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2)
+#define ia64_padd2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 2)
+#define ia64_padd1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 3)
+#define ia64_padd2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 3)
+
+#define ia64_psub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 0)
+#define ia64_psub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 0)
+#define ia64_psub4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 1, 0)
+#define ia64_psub1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 1)
+#define ia64_psub2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 1)
+#define ia64_psub1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 2)
+#define ia64_psub2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 2)
+#define ia64_psub1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 3)
+#define ia64_psub2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 3)
+
+#define ia64_pavg1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2)
+#define ia64_pavg2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 2)
+#define ia64_pavg1_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 3)
+#define ia64_pavg2_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 3)
+#define ia64_pavgsub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 3, 2)
+#define ia64_pavgsub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 3, 2)
+#define ia64_pcmp1_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 0)
+#define ia64_pcmp2_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 0)
+#define ia64_pcmp4_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 0)
+#define ia64_pcmp1_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 1)
+#define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1)
+#define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1)
+
+#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count);
+#define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count);
+
+#define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3)))
+
+/*
+ * I-Unit Instructions
+ */
+
+#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count));
+
+#define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count));
+
+#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3)
+#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3)
+#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2)
+#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2)
+#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0)
+#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0)
+#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0)
+#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1)
+#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1)
+#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1)
+#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1)
+#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1)
+#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1)
+#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0)
+#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1)
+#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0)
+#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1)
+#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2)
+
+typedef enum {
+ IA64_MUX1_BRCST = 0x0,
+ IA64_MUX1_MIX = 0x8,
+ IA64_MUX1_SHUF = 0x9,
+ IA64_MUX1_ALT = 0xa,
+ IA64_MUX1_REV = 0xb
+} Ia64Mux1Permutation;
+
+#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2)
+
+#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2)
+
+#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0)
+#define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0)
+#define ia64_shr_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 2, 0)
+#define ia64_pshr2_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 0)
+#define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0)
+#define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0)
+
+#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0)
+#define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0)
+#define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0)
+#define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0)
+
+#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1)
+#define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1)
+#define ia64_shl_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1)
+
+#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1)
+#define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1)
+
+#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2)
+
+#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0)
+
+#define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0)
+
+#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0)
+#define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1)
+
+#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0)
+
+#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1)
+
+#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1)
+
+#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0)
+
+#define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len))
+
+#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0)
+#define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1)
+#define ia64_tbit_z_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 0)
+#define ia64_tbit_nz_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 1)
+#define ia64_tbit_z_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 0)
+#define ia64_tbit_nz_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 1)
+#define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0)
+#define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1)
+
+#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0)
+#define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1)
+#define ia64_tnat_z_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 0)
+#define ia64_tnat_nz_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 1)
+#define ia64_tnat_z_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 0)
+#define ia64_tnat_nz_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 1)
+#define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0)
+#define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1)
+
+#define ia64_i18(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0)
+#define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1)
+
+#define ia64_i19(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0)
+
+#define ia64_i20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); check_imm21 ((imm)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { read_pr ((code), (qp)); check_imm8 (tag13); write_br ((code), (b1)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0)
+
+typedef enum {
+ IA64_MOV_TO_BR_WH_SPTK = 0,
+ IA64_MOV_TO_BR_WH_NONE = 1,
+ IA64_MOV_TO_BR_WH_DPTK = 2
+} Ia64MovToBrWhetherHint;
+
+typedef enum {
+ IA64_BR_IH_NONE = 0,
+ IA64_BR_IH_IMP = 1
+} Ia64BranchImportanceHint;
+
+#define ia64_mov_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh)
+#define ia64_mov_ret_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br_pred(code, qp, b1, r2) ia64_mov_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+#define ia64_mov_ret_to_br_pred(code, qp, b1, r2) ia64_mov_ret_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+
+/* End of pseudo ops */
+
+#define ia64_i22(code, qp, r1, b2, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_br ((code), (b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_br_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31);
+
+#define ia64_i23(code, qp, r2, mask, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3)
+
+#define ia64_i24(code, qp, imm, x3) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2)
+
+#define ia64_i25(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30)
+#define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33)
+
+#define ia64_i26(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_i27(code, qp, ar3, imm, x3, x6) do { read_pr ((code), (qp)); write_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a)
+
+#define ia64_i28(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32)
+
+#define ia64_i29(code, qp, r1, r3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10)
+#define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11)
+#define ia64_zxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x12)
+#define ia64_sxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x14)
+#define ia64_sxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x15)
+#define ia64_sxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x16)
+#define ia64_czx1_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x18)
+#define ia64_czx2_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x19)
+#define ia64_czx1_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1C)
+#define ia64_czx2_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1D)
+
+/*
+ * M-Unit Instructions
+ */
+
+typedef enum {
+ IA64_LD_HINT_NONE = 0,
+ IA64_LD_HINT_NT1 = 1,
+ IA64_LD_HINT_NTA = 3
+} Ia64LoadHint;
+
+typedef enum {
+ IA64_ST_HINT_NONE = 0,
+ IA64_ST_HINT_NTA = 3
+} Ia64StoreHint;
+
+#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00)
+#define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ld4_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ld8_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x03)
+
+#define ia64_ld1_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x04)
+#define ia64_ld2_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ld4_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ld8_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x07)
+
+#define ia64_ld1_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x08)
+#define ia64_ld2_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ld4_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ld8_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0B)
+
+#define ia64_ld1_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0C)
+#define ia64_ld2_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ld4_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ld8_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0F)
+
+#define ia64_ld1_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x10)
+#define ia64_ld2_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x11)
+#define ia64_ld4_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x12)
+#define ia64_ld8_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x13)
+
+#define ia64_ld1_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x14)
+#define ia64_ld2_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x15)
+#define ia64_ld4_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x16)
+#define ia64_ld8_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x17)
+
+#define ia64_ld8_fill_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_ld1_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x20)
+#define ia64_ld2_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ld4_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ld8_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x23)
+
+#define ia64_ld1_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x24)
+#define ia64_ld2_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ld4_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ld8_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x28)
+#define ia64_ld2_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x29)
+#define ia64_ld4_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2B)
+
+/* FIXME: This writes AR.CSD */
+#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28);
+#define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C)
+
+#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); ; ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B)
+
+#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B)
+
+/* Pseudo ops */
+
+#define ia64_ld1_pred(code, qp, r1, r3) ia64_ld1_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_pred(code, qp, r1, r3) ia64_ld2_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_pred(code, qp, r1, r3) ia64_ld4_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_pred(code, qp, r1, r3) ia64_ld8_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_s_pred(code, qp, r1, r3) ia64_ld1_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_s_pred(code, qp, r1, r3) ia64_ld2_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_s_pred(code, qp, r1, r3) ia64_ld4_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_s_pred(code, qp, r1, r3) ia64_ld8_s_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_a_pred(code, qp, r1, r3) ia64_ld1_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_a_pred(code, qp, r1, r3) ia64_ld2_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_a_pred(code, qp, r1, r3) ia64_ld4_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_a_pred(code, qp, r1, r3) ia64_ld8_a_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_sa_pred(code, qp, r1, r3) ia64_ld1_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_sa_pred(code, qp, r1, r3) ia64_ld2_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_sa_pred(code, qp, r1, r3) ia64_ld4_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_sa_pred(code, qp, r1, r3) ia64_ld8_sa_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_bias_pred(code, qp, r1, r3) ia64_ld1_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_bias_pred(code, qp, r1, r3) ia64_ld2_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_bias_pred(code, qp, r1, r3) ia64_ld4_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_bias_pred(code, qp, r1, r3) ia64_ld8_bias_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_acq_pred(code, qp, r1, r3) ia64_ld1_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_acq_pred(code, qp, r1, r3) ia64_ld2_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_acq_pred(code, qp, r1, r3) ia64_ld4_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_acq_pred(code, qp, r1, r3) ia64_ld8_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld8_fill_pred(code, qp, r1, r3) ia64_ld8_fill_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_pred(code, qp, r1, r3) ia64_ld1_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_pred(code, qp, r1, r3) ia64_ld2_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_pred(code, qp, r1, r3) ia64_ld4_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_pred(code, qp, r1, r3) ia64_ld8_c_clr_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_nc_pred(code, qp, r1, r3) ia64_ld1_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_nc_pred(code, qp, r1, r3) ia64_ld2_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_nc_pred(code, qp, r1, r3) ia64_ld4_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_nc_pred(code, qp, r1, r3) ia64_ld8_c_nc_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq_pred(code, qp, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_acq_pred(code, qp, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_acq_pred(code, qp, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_acq_pred(code, qp, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld16_pred(code, qp, r1, r3) ia64_ld16_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld16_acq_pred(code, qp, r1, r3) ia64_ld16_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_inc_pred(code, qp, r1, r2, r3) ia64_ld1_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_inc_pred(code, qp, r1, r2, r3) ia64_ld2_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_inc_pred(code, qp, r1, r2, r3) ia64_ld4_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_inc_pred(code, qp, r1, r2, r3) ia64_ld8_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc_pred(code, qp, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_s_inc_pred(code, qp, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_s_inc_pred(code, qp, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_s_inc_pred(code, qp, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc_pred(code, qp, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_a_inc_pred(code, qp, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_a_inc_pred(code, qp, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_a_inc_pred(code, qp, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc_pred(code, qp, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30)
+#define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31)
+#define ia64_st4_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x32)
+#define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33)
+
+/* Pseudo ops */
+
+#define ia64_st8_pred(code, qp, r3, r2) ia64_st8_hint_pred ((code), (qp), (r3), (r2), 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x3B)
+
+#define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30)
+#define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34)
+
+#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30)
+#define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31)
+#define ia64_st4_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x32)
+#define ia64_st8_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x33)
+
+#define ia64_st1_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B)
+
+#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03)
+#define ia64_ldf8_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ldfe_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x00)
+
+#define ia64_ldfs_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ldfd_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x07)
+#define ia64_ldf8_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ldfe_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x04)
+
+#define ia64_ldfs_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ldfd_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0B)
+#define ia64_ldf8_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ldfe_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x08)
+
+#define ia64_ldfs_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ldfd_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0F)
+#define ia64_ldf8_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ldfe_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ldfd_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x23)
+#define ia64_ldf8_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ldfe_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x20)
+
+#define ia64_ldfs_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ldfd_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x27)
+#define ia64_ldf8_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ldfe_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x24)
+
+#define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02)
+#define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03)
+#define ia64_ldf8_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x01)
+#define ia64_ldfe_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x00)
+
+#define ia64_ldfs_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x06)
+#define ia64_ldfd_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x07)
+#define ia64_ldf8_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x05)
+#define ia64_ldfe_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x04)
+
+#define ia64_ldfs_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0A)
+#define ia64_ldfd_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0B)
+#define ia64_ldf8_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x09)
+#define ia64_ldfe_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x08)
+
+#define ia64_ldfs_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0E)
+#define ia64_ldfd_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0F)
+#define ia64_ldf8_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0D)
+#define ia64_ldfe_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x22)
+#define ia64_ldfd_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x23)
+#define ia64_ldf8_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x21)
+#define ia64_ldfe_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x20)
+
+#define ia64_ldfs_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x26)
+#define ia64_ldfd_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x27)
+#define ia64_ldf8_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x25)
+#define ia64_ldfe_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x24)
+
+#define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B)
+
+#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02)
+#define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03)
+#define ia64_ldf8_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x01)
+#define ia64_ldfe_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x00)
+
+#define ia64_ldfs_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x06)
+#define ia64_ldfd_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x07)
+#define ia64_ldf8_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x05)
+#define ia64_ldfe_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x04)
+
+#define ia64_ldfs_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0A)
+#define ia64_ldfd_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0B)
+#define ia64_ldf8_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x09)
+#define ia64_ldfe_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x08)
+
+#define ia64_ldfs_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0E)
+#define ia64_ldfd_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0F)
+#define ia64_ldf8_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0D)
+#define ia64_ldfe_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0C)
+
+#define ia64_ldfs_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x22)
+#define ia64_ldfd_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x23)
+#define ia64_ldf8_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x21)
+#define ia64_ldfe_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x20)
+
+#define ia64_ldfs_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x26)
+#define ia64_ldfd_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x27)
+#define ia64_ldf8_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x25)
+#define ia64_ldfe_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x24)
+
+#define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B)
+
+/* Pseudo ops */
+
+#define ia64_ldfs_pred(code, qp, f1, r3) ia64_ldfs_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_pred(code, qp, f1, r3) ia64_ldfd_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_pred(code, qp, f1, r3) ia64_ldf8_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_pred(code, qp, f1, r3) ia64_ldfe_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_s_pred(code, qp, f1, r3) ia64_ldfs_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_s_pred(code, qp, f1, r3) ia64_ldfd_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_s_pred(code, qp, f1, r3) ia64_ldf8_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_s_pred(code, qp, f1, r3) ia64_ldfe_s_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_a_pred(code, qp, f1, r3) ia64_ldfs_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_a_pred(code, qp, f1, r3) ia64_ldfd_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_a_pred(code, qp, f1, r3) ia64_ldf8_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_a_pred(code, qp, f1, r3) ia64_ldfe_a_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_sa_pred(code, qp, f1, r3) ia64_ldfs_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_sa_pred(code, qp, f1, r3) ia64_ldfd_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_sa_pred(code, qp, f1, r3) ia64_ldf8_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_sa_pred(code, qp, f1, r3) ia64_ldfe_sa_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_clr_pred(code, qp, f1, r3) ia64_ldfs_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_clr_pred(code, qp, f1, r3) ia64_ldfd_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_clr_pred(code, qp, f1, r3) ia64_ldf8_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_clr_pred(code, qp, f1, r3) ia64_ldfe_c_clr_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_nc_pred(code, qp, f1, r3) ia64_ldfs_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_nc_pred(code, qp, f1, r3) ia64_ldfd_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_nc_pred(code, qp, f1, r3) ia64_ldf8_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_nc_pred(code, qp, f1, r3) ia64_ldfe_c_nc_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldf_fill_pred(code, qp, f1, r3) ia64_ldf_fill_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_s_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_a_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldf_fill_inc_pred(code, qp, f1, r3, r2) ia64_ldf_fill_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldf_fill_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf_fill_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32)
+#define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33)
+#define ia64_stf8_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x31)
+#define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30)
+#define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B)
+
+#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_fr ((code), (f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32)
+#define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33)
+#define ia64_stf8_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x31)
+#define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30)
+#define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B)
+
+#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02)
+#define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03)
+#define ia64_ldfp8_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x01)
+
+#define ia64_ldfps_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x06)
+#define ia64_ldfpd_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x07)
+#define ia64_ldfp8_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x05)
+
+#define ia64_ldfps_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0A)
+#define ia64_ldfpd_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0B)
+#define ia64_ldfp8_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x09)
+
+#define ia64_ldfps_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0E)
+#define ia64_ldfpd_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0F)
+#define ia64_ldfp8_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x22)
+#define ia64_ldfpd_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x23)
+#define ia64_ldfp8_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x21)
+
+#define ia64_ldfps_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x26)
+#define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27)
+#define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25)
+
+#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); write_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02)
+#define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03)
+#define ia64_ldfp8_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x01)
+
+#define ia64_ldfps_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x06)
+#define ia64_ldfpd_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x07)
+#define ia64_ldfp8_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x05)
+
+#define ia64_ldfps_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0A)
+#define ia64_ldfpd_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0B)
+#define ia64_ldfp8_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x09)
+
+#define ia64_ldfps_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0E)
+#define ia64_ldfpd_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0F)
+#define ia64_ldfp8_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x22)
+#define ia64_ldfpd_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x23)
+#define ia64_ldfp8_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x21)
+
+#define ia64_ldfps_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x26)
+#define ia64_ldfpd_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x27)
+#define ia64_ldfp8_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x25)
+
+typedef enum {
+ IA64_LFHINT_NONE = 0,
+ IA64_LFHINT_NT1 = 1,
+ IA64_LFHINT_NT2 = 2,
+ IA64_LFHINT_NTA = 3
+} Ia64LinePrefetchHint;
+
+#define ia64_m13(code, qp, r3, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C)
+#define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D)
+#define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E)
+#define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F)
+
+#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C)
+#define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D)
+#define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E)
+#define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F)
+
+#define ia64_m15(code, qp, r3, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C)
+#define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D)
+#define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E)
+#define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F)
+
+#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00)
+#define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01)
+#define ia64_cmpxchg4_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x02)
+#define ia64_cmpxchg8_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x03)
+#define ia64_cmpxchg1_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x04)
+#define ia64_cmpxchg2_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x05)
+#define ia64_cmpxchg4_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x06)
+#define ia64_cmpxchg8_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x07)
+#define ia64_cmpxchg16_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x20)
+#define ia64_cmpxchg16_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x24)
+#define ia64_xchg1_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x08)
+#define ia64_xchg2_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x09)
+#define ia64_xchg4_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0A)
+#define ia64_xchg8_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0B)
+
+#define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3)))
+
+#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm; read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12)
+#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x13)
+#define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16)
+#define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17)
+
+#define ia64_m18(code, qp, f1, r2, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_fr ((code), (f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C)
+#define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D)
+#define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E)
+#define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F)
+
+#define ia64_m19(code, qp, r1, f2, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C)
+#define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D)
+#define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E)
+#define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F)
+
+#define ia64_m20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_m21(code, qp, f2, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3)
+
+#define ia64_m22(code, qp, r1, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4)
+#define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5)
+
+#define ia64_m23(code, qp, f1, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6)
+#define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7)
+
+#define ia64_m24(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1)
+#define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2)
+#define ia64_mf_pred(code, qp) ia64_m24 ((code), (qp), 0, 2, 2)
+#define ia64_mf_a_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 2)
+#define ia64_srlz_d_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 3)
+#define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3)
+#define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3)
+
+#define ia64_m25(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0)
+#define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0)
+
+#define ia64_m26(code, qp, r1, x3, x4, x2) do { read_pr ((code), (qp)); read_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1)
+
+#define ia64_m27(code, qp, f1, x3, x4, x2) do { read_pr ((code), (qp)); read_fr ((code), (f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1)
+
+#define ia64_m28(code, qp, r3, x3, x6, x) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0)
+
+#define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0)
+#define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1)
+
+#define ia64_m29(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2)
+
+#define ia64_m31(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22)
+
+#define ia64_m32(code, qp, cr3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C)
+
+#define ia64_m33(code, qp, r1, cr3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24)
+
+#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { read_pr ((code), (qp)); check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0)
+
+#define ia64_m35(code, qp, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D)
+#define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29)
+
+#define ia64_m36(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25)
+#define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21)
+
+#define ia64_m37(code, qp, imm, x3, x2, x4) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0)
+
+/* The System/Memory Management instruction encodings (M38-M47) are missing */
+
+#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0)
+#define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1)
+
+typedef enum {
+ IA64_BWH_SPTK = 0,
+ IA64_BWH_SPNT = 1,
+ IA64_BWH_DPTK = 2,
+ IA64_BWH_DPNT = 3
+} Ia64BranchWhetherHint;
+
+typedef enum {
+ IA64_PH_FEW = 0,
+ IA64_PH_MANY = 1
+} Ia64SeqPrefetchHint;
+
+typedef enum {
+ IA64_DH_NONE = 0,
+ IA64_DH_CLR = 1
+} Ia64BranchCacheDeallocHint;
+
+#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { read_pr_branch ((code), (qp)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+#define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2)
+#define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3)
+
+#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5)
+#define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6)
+#define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7)
+
+#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0)
+
+#define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0)
+#define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1)
+#define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4)
+
+#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh))
+
+/* Pseudo ops */
+
+#define ia64_br_cond_pred(code, qp, disp) ia64_br_cond_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wexit_pred(code, qp, disp) ia64_br_wexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wtop_pred(code, qp, disp) ia64_br_wtop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_cloop_pred(code, qp, disp) ia64_br_cloop_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_cexit_pred(code, qp, disp) ia64_br_cexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_ctop_pred(code, qp, disp) ia64_br_ctop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_call_pred(code, qp, b1, disp) ia64_br_call_hint_pred (code, qp, b1, disp, 0, 0, 0)
+
+#define ia64_br_cond_reg_pred(code, qp, b1) ia64_br_cond_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ia_reg_pred(code, qp, b1) ia64_br_ia_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ret_reg_pred(code, qp, b1) ia64_br_ret_reg_hint_pred (code, qp, b1, 0, 0, 0)
+
+#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred (code, qp, b1, b2, 0, 0, 0)
+
+/* End of pseudo ops */
+
+typedef enum {
+ IA64_IPWH_SPTK = 0,
+ IA64_IPWH_LOOP = 1,
+ IA64_IPWH_DPTK = 2,
+ IA64_IPWH_EXIT = 3
+} Ia64IPRelativeBranchWhetherHint;
+
+/* B6 and B7 is missing */
+
+#define ia64_b8(code, qp, x6) do { read_pr ((code), (qp)); ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0)
+
+#define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02)
+#define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04)
+#define ia64_clrrrb_pr_pred(code, qp) ia64_b8 ((code), (qp), 0x05)
+#define ia64_rfi_pred(code, qp) ia64_b8 ((code), (qp), 0x08)
+#define ia64_bsw_0_pred(code, qp) ia64_b8 ((code), (qp), 0x0C)
+#define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D)
+#define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10)
+
+#define ia64_b9(code, qp, imm, opcode, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00)
+#define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00)
+#define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01)
+
+/*
+ * F-Unit Instructions
+ */
+
+#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); read_fr ((code), (f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 0)
+#define ia64_fma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 1)
+#define ia64_fma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 0)
+#define ia64_fpma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 1)
+#define ia64_fms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 0)
+#define ia64_fms_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 1)
+#define ia64_fms_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 0)
+#define ia64_fpms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 1)
+#define ia64_fnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 0)
+#define ia64_fnma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 1)
+#define ia64_fnma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 0)
+#define ia64_fpnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 1)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf_pred(code, qp, f1, f3, sf) ia64_fma_s_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+#define ia64_fnorm_d_sf_pred(code, qp, f1, f3, sf) ia64_fma_d_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+
+#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0)
+#define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3)
+#define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_lu_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_h_pred(code, qp, f1, f3, f4) ia64_xma_h_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_hu_pred(code, qp, f1, f3, f4) ia64_xma_hu_pred ((code), (qp), (f1), (f3), (f4), 0)
+
+#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0)
+
+#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { read_pr ((code), (qp)); read_fr ((code), (f2)); read_fr ((code), (f3)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0)
+
+#define ia64_fcmp_eq_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 0)
+#define ia64_fcmp_lt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 0)
+#define ia64_fcmp_le_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 0)
+#define ia64_fcmp_unord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 0)
+#define ia64_fcmp_eq_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 1)
+#define ia64_fcmp_lt_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 1)
+#define ia64_fcmp_le_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 1)
+#define ia64_fcmp_unord_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 1)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ne_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nlt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nle_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_ngt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+
+#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { read_pr ((code), (qp)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0)
+
+#define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0)
+#define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1)
+
+#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 0, 1, 0)
+#define ia64_fprcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 1, 1, 0)
+
+#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 0, 1, 1)
+#define ia64_fprsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 1, 1, 1)
+
+#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x14)
+#define ia64_fman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x15)
+#define ia64_famin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x16)
+#define ia64_famax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x17)
+#define ia64_fpmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x14)
+#define ia64_fpman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x15)
+#define ia64_fpamin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x16)
+#define ia64_fpamax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x17)
+#define ia64_fpcmp_eq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x30)
+#define ia64_fpcmp_lt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x31)
+#define ia64_fpcmp_le_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x32)
+#define ia64_fpcmp_unord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x33)
+#define ia64_fpcmp_neq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x34)
+#define ia64_fpcmp_nlt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x35)
+#define ia64_fpcmp_nle_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x36)
+#define ia64_fpcmp_ord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x37)
+
+#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0)
+
+#define ia64_fmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+#define ia64_fmix_lr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x39)
+#define ia64_fmix_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3A)
+#define ia64_fmix_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3B)
+#define ia64_fsxt_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3C)
+#define ia64_fsxt_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3D)
+#define ia64_fpack_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x28)
+#define ia64_fswap_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x34)
+#define ia64_fswap_nl_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x35)
+#define ia64_fswap_nr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x36)
+#define ia64_fand_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2C)
+#define ia64_fandcm_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2D)
+#define ia64_for_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2E)
+#define ia64_fxor_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2F)
+#define ia64_fpmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fpmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fpmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+
+/* Pseudo ops */
+#define ia64_fmov_pred(code, qp, f1, f3) ia64_fmerge_s_pred ((code), (qp), (f1), (f3), (f3))
+
+#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18)
+#define ia64_fcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x19)
+#define ia64_fcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1A)
+#define ia64_fcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1B)
+#define ia64_fpcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x18)
+#define ia64_fpcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x19)
+#define ia64_fpcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1A)
+#define ia64_fpcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1B)
+
+#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_xf_pred(code, qp, f1, f2) ia64_f11 ((code), (qp), (f1), (f2), 0, 0, 0x1C)
+
+#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fsetc_sf_pred(code, qp, amask, omask, sf) ia64_f12 ((code), (qp), (amask), (omask), (sf), 0, 0, 0x04)
+
+#define ia64_f13(code, qp, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fclrf_sf_pred(code, qp, sf) ia64_f13 ((code), (qp), (sf), 0, 0, 0x05)
+
+#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_fchkf_sf_pred(code, qp, disp, sf) ia64_f14 ((code), (qp), (disp), (sf), 0, 0, 0x8)
+
+#define ia64_f15(code, qp, imm, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_f_pred(code, qp, imm) ia64_f15 ((code), (qp), (imm), 0, 0, 0x0)
+
+/*
+ * X-UNIT ENCODINGS
+ */
+
+#define ia64_x1(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00)
+
+#define ia64_x2(code, qp, r1, imm, vc) do { if (code.nins > IA64_INS_BUFFER_SIZE - 2) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0)
+
+#define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0)
+
+#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0)
+
+#define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+
+#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0)
+
+#define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_x5(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0)
+#define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1)
+
+
+
+
+
+
+/*
+ * Non predicated instruction variants
+ */
+
+
+#define ia64_add(code, r1, r2, r3) ia64_add_pred ((code), 0, r1, r2, r3)
+#define ia64_add1(code, r1, r2, r3) ia64_add1_pred ((code), 0, r1, r2, r3)
+#define ia64_sub(code, r1, r2, r3) ia64_sub_pred ((code), 0, r1, r2, r3)
+#define ia64_sub1(code, r1, r2, r3) ia64_sub1_pred ((code), 0, r1, r2, r3)
+#define ia64_addp4(code, r1, r2, r3) ia64_addp4_pred ((code), 0, r1, r2, r3)
+#define ia64_and(code, r1, r2, r3) ia64_and_pred ((code), 0, r1, r2, r3)
+#define ia64_andcm(code, r1, r2, r3) ia64_andcm_pred ((code), 0, r1, r2, r3)
+#define ia64_or(code, r1, r2, r3) ia64_or_pred ((code), 0, r1, r2, r3)
+#define ia64_xor(code, r1, r2, r3) ia64_xor_pred ((code), 0, r1, r2, r3)
+
+
+#define ia64_shladd(code, r1, r2, r3,count) ia64_shladd_pred ((code), 0, r1, r2, r3,count)
+#define ia64_shladdp4(code, r1, r2, r3,count) ia64_shladdp4_pred ((code), 0, r1, r2, r3,count)
+
+
+#define ia64_sub_imm(code, r1,imm8,r3) ia64_sub_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_and_imm(code, r1,imm8,r3) ia64_and_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_andcm_imm(code, r1,imm8,r3) ia64_andcm_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_or_imm(code, r1,imm8,r3) ia64_or_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_xor_imm(code, r1,imm8,r3) ia64_xor_imm_pred ((code), 0, r1,imm8,r3)
+
+
+#define ia64_adds_imm(code, r1,imm14,r3) ia64_adds_imm_pred ((code), 0, r1,imm14,r3)
+#define ia64_addp4_imm(code, r1,imm14,r3) ia64_addp4_imm_pred ((code), 0, r1,imm14,r3)
+
+
+#define ia64_addl_imm(code, r1,imm22,r3) ia64_addl_imm_pred ((code), 0, r1,imm22,r3)
+
+
+#define ia64_cmp_lt(code, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu(code, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq(code, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_unc(code, p1, p2, r2, r3) ia64_cmp_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu_unc(code, p1, p2, r2, r3) ia64_cmp_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_unc(code, p1, p2, r2, r3) ia64_cmp_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_and(code, p1, p2, r2, r3) ia64_cmp_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or(code, p1, p2, r2, r3) ia64_cmp_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_and(code, p1, p2, r2, r3) ia64_cmp_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or(code, p1, p2, r2, r3) ia64_cmp_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_lt(code, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu(code, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq(code, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_unc(code, p1, p2, r2, r3) ia64_cmp4_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu_unc(code, p1, p2, r2, r3) ia64_cmp4_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_unc(code, p1, p2, r2, r3) ia64_cmp4_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_and(code, p1, p2, r2, r3) ia64_cmp4_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or(code, p1, p2, r2, r3) ia64_cmp4_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_and(code, p1, p2, r2, r3) ia64_cmp4_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or(code, p1, p2, r2, r3) ia64_cmp4_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne(code, p1, p2, r2, r3) ia64_cmp_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le(code, p1, p2, r2, r3) ia64_cmp_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt(code, p1, p2, r2, r3) ia64_cmp_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge(code, p1, p2, r2, r3) ia64_cmp_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_leu(code, p1, p2, r2, r3) ia64_cmp_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gtu(code, p1, p2, r2, r3) ia64_cmp_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_geu(code, p1, p2, r2, r3) ia64_cmp_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_ne(code, p1, p2, r2, r3) ia64_cmp4_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le(code, p1, p2, r2, r3) ia64_cmp4_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt(code, p1, p2, r2, r3) ia64_cmp4_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge(code, p1, p2, r2, r3) ia64_cmp4_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_leu(code, p1, p2, r2, r3) ia64_cmp4_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gtu(code, p1, p2, r2, r3) ia64_cmp4_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_geu(code, p1, p2, r2, r3) ia64_cmp4_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp_gt_and(code, p1, p2, r2, r3) ia64_cmp_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or(code, p1, p2, r2, r3) ia64_cmp_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_and(code, p1, p2, r2, r3) ia64_cmp_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or(code, p1, p2, r2, r3) ia64_cmp_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_and(code, p1, p2, r2, r3) ia64_cmp_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or(code, p1, p2, r2, r3) ia64_cmp_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_and(code, p1, p2, r2, r3) ia64_cmp_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or(code, p1, p2, r2, r3) ia64_cmp_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_gt_and(code, p1, p2, r2, r3) ia64_cmp4_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or(code, p1, p2, r2, r3) ia64_cmp4_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_and(code, p1, p2, r2, r3) ia64_cmp4_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or(code, p1, p2, r2, r3) ia64_cmp4_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_and(code, p1, p2, r2, r3) ia64_cmp4_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or(code, p1, p2, r2, r3) ia64_cmp4_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_and(code, p1, p2, r2, r3) ia64_cmp4_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or(code, p1, p2, r2, r3) ia64_cmp4_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+
+#define ia64_cmp_lt_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_lt_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_le_imm(code, p1, p2, imm8, r3) ia64_cmp_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gt_imm(code, p1, p2, imm8, r3) ia64_cmp_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ge_imm(code, p1, p2, imm8, r3) ia64_cmp_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_leu_imm(code, p1, p2, imm8, r3) ia64_cmp_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_geu_imm(code, p1, p2, imm8, r3) ia64_cmp_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_ne_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_le_imm(code, p1, p2, imm8, r3) ia64_cmp4_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gt_imm(code, p1, p2, imm8, r3) ia64_cmp4_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ge_imm(code, p1, p2, imm8, r3) ia64_cmp4_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_leu_imm(code, p1, p2, imm8, r3) ia64_cmp4_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp4_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_geu_imm(code, p1, p2, imm8, r3) ia64_cmp4_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_padd1(code, r1,r2,r3) ia64_padd1_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2(code, r1,r2,r3) ia64_padd2_pred ((code), 0, r1,r2,r3)
+#define ia64_padd4(code, r1,r2,r3) ia64_padd4_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_sss(code, r1,r2,r3) ia64_padd1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_sss(code, r1,r2,r3) ia64_padd2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uuu(code, r1,r2,r3) ia64_padd1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uuu(code, r1,r2,r3) ia64_padd2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uus(code, r1,r2,r3) ia64_padd1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uus(code, r1,r2,r3) ia64_padd2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_psub1(code, r1,r2,r3) ia64_psub1_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2(code, r1,r2,r3) ia64_psub2_pred ((code), 0, r1,r2,r3)
+#define ia64_psub4(code, r1,r2,r3) ia64_psub4_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_sss(code, r1,r2,r3) ia64_psub1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_sss(code, r1,r2,r3) ia64_psub2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uuu(code, r1,r2,r3) ia64_psub1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uuu(code, r1,r2,r3) ia64_psub2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uus(code, r1,r2,r3) ia64_psub1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uus(code, r1,r2,r3) ia64_psub2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_pavg1(code, r1,r2,r3) ia64_pavg1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2(code, r1,r2,r3) ia64_pavg2_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg1_raz(code, r1,r2,r3) ia64_pavg1_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2_raz(code, r1,r2,r3) ia64_pavg2_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub1(code, r1,r2,r3) ia64_pavgsub1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub2(code, r1,r2,r3) ia64_pavgsub2_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_eq(code, r1,r2,r3) ia64_pcmp1_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_eq(code, r1,r2,r3) ia64_pcmp2_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_eq(code, r1,r2,r3) ia64_pcmp4_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_gt(code, r1,r2,r3) ia64_pcmp1_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_gt(code, r1,r2,r3) ia64_pcmp2_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_gt(code, r1,r2,r3) ia64_pcmp4_gt_pred ((code), 0, r1,r2,r3)
+
+
+#define ia64_pshladd2(code, r1, r2, r3, count) ia64_pshladd2_pred ((code), 0, r1, r2, r3, count)
+#define ia64_pshradd2(code, r1, r2, r3, count) ia64_pshradd2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2(code, r1, r2, r3, count) ia64_pmpyshr2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2_u(code, r1, r2, r3, count) ia64_pmpyshr2_u_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3)
+#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3)
+
+#define ia64_mux1(code, r1, r2, mbtype) ia64_mux1_pred ((code), 0, r1, r2, mbtype)
+
+
+#define ia64_mux2(code, r1, r2, mhtype) ia64_mux2_pred ((code), 0, r1, r2, mhtype)
+
+
+#define ia64_pshr2(code, r1, r3, r2) ia64_pshr2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4(code, r1, r3, r2) ia64_pshr4_pred ((code), 0, r1, r3, r2)
+#define ia64_shr(code, r1, r3, r2) ia64_shr_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr2_u(code, r1, r3, r2) ia64_pshr2_u_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4_u(code, r1, r3, r2) ia64_pshr4_u_pred ((code), 0, r1, r3, r2)
+#define ia64_shr_u(code, r1, r3, r2) ia64_shr_u_pred ((code), 0, r1, r3, r2)
+
+
+#define ia64_pshr2_imm(code, r1, r3, count) ia64_pshr2_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_imm(code, r1, r3, count) ia64_pshr4_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr2_u_imm(code, r1, r3, count) ia64_pshr2_u_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_u_imm(code, r1, r3, count) ia64_pshr4_u_imm_pred ((code), 0, r1, r3, count)
+
+
+#define ia64_pshl2(code, r1, r3, r2) ia64_pshl2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshl4(code, r1, r3, r2) ia64_pshl4_pred ((code), 0, r1, r3, r2)
+#define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2)
+
+#define ia64_shl_imm(code, r1, r3, count) ia64_dep_z ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_imm(code, r1, r3, count) ia64_extr ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_u_imm(code, r1, r3, count) ia64_extr_u ((code), (r1), (r3), count, 64 - count)
+
+#define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count)
+#define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count)
+
+
+#define ia64_popcnt(code, r1, r3) ia64_popcnt_pred ((code), 0, r1, r3)
+
+
+#define ia64_shrp(code, r1, r2, r3, count) ia64_shrp_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_extr_u(code, r1, r3, pos, len) ia64_extr_u_pred ((code), 0, r1, r3, pos, len)
+#define ia64_extr(code, r1, r3, pos, len) ia64_extr_pred ((code), 0, r1, r3, pos, len)
+
+
+#define ia64_dep_z(code, r1, r2, pos, len) ia64_dep_z_pred ((code), 0, r1, r2, pos, len)
+
+
+#define ia64_dep_z_imm(code, r1, imm, pos, len) ia64_dep_z_imm_pred ((code), 0, r1, imm, pos, len)
+
+
+#define ia64_dep_imm(code, r1, imm, r3, pos, len) ia64_dep_imm_pred ((code), 0, r1, imm, r3, pos, len)
+
+
+#define ia64_dep(code, r1, r2, r3, pos, len) ia64_dep_pred ((code), 0, r1, r2, r3, pos, len)
+
+
+#define ia64_tbit_z(code, p1, p2, r3, pos) ia64_tbit_z_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_unc(code, p1, p2, r3, pos) ia64_tbit_z_unc_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_and(code, p1, p2, r3, pos) ia64_tbit_z_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_and(code, p1, p2, r3, pos) ia64_tbit_nz_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or(code, p1, p2, r3, pos) ia64_tbit_z_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or(code, p1, p2, r3, pos) ia64_tbit_nz_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or_andcm(code, p1, p2, r3, pos) ia64_tbit_z_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or_andcm(code, p1, p2, r3, pos) ia64_tbit_nz_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+
+
+#define ia64_tnat_z(code, p1, p2, r3) ia64_tnat_z_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_unc(code, p1, p2, r3) ia64_tnat_z_unc_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_and(code, p1, p2, r3) ia64_tnat_z_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_and(code, p1, p2, r3) ia64_tnat_nz_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or(code, p1, p2, r3) ia64_tnat_z_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or(code, p1, p2, r3) ia64_tnat_nz_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or_andcm(code, p1, p2, r3) ia64_tnat_z_or_andcm_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or_andcm(code, p1, p2, r3) ia64_tnat_nz_or_andcm_pred ((code), 0, p1, p2, r3)
+
+
+#define ia64_nop_i(code, imm) ia64_nop_i_pred ((code), 0, imm)
+#define ia64_hint_i(code, imm) ia64_hint_i_pred ((code), 0, imm)
+
+
+#define ia64_break_i(code, imm) ia64_break_i_pred ((code), 0, imm)
+
+
+#define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp)
+
+#define ia64_mov_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+#define ia64_mov_ret_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br(code, b1, r2) ia64_mov_to_br_pred ((code), 0, (b1), (r2))
+#define ia64_mov_ret_to_br(code, b1, r2) ia64_mov_ret_to_br_pred ((code), 0, (b1), (r2))
+
+/* End of pseudo ops */
+
+#define ia64_mov_from_br(code, r1, b2) ia64_mov_from_br_pred ((code), 0, r1, b2)
+
+
+#define ia64_mov_to_pred(code, r2, mask) ia64_mov_to_pred_pred ((code), 0, r2, mask)
+
+
+#define ia64_mov_to_pred_rot_imm(code, imm) ia64_mov_to_pred_rot_imm_pred ((code), 0, imm)
+
+
+#define ia64_mov_from_ip(code, r1) ia64_mov_from_ip_pred ((code), 0, r1)
+#define ia64_mov_from_pred(code, r1) ia64_mov_from_pred_pred ((code), 0, r1)
+
+
+#define ia64_mov_to_ar_i(code, ar3, r2) ia64_mov_to_ar_i_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_i(code, ar3, imm) ia64_mov_to_ar_imm_i_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_i(code, r1, ar3) ia64_mov_from_ar_i_pred ((code), 0, r1, ar3)
+
+
+#define ia64_zxt1(code, r1, r3) ia64_zxt1_pred ((code), 0, r1, r3)
+#define ia64_zxt2(code, r1, r3) ia64_zxt2_pred ((code), 0, r1, r3)
+#define ia64_zxt4(code, r1, r3) ia64_zxt4_pred ((code), 0, r1, r3)
+#define ia64_sxt1(code, r1, r3) ia64_sxt1_pred ((code), 0, r1, r3)
+#define ia64_sxt2(code, r1, r3) ia64_sxt2_pred ((code), 0, r1, r3)
+#define ia64_sxt4(code, r1, r3) ia64_sxt4_pred ((code), 0, r1, r3)
+#define ia64_czx1_l(code, r1, r3) ia64_czx1_l_pred ((code), 0, r1, r3)
+#define ia64_czx2_l(code, r1, r3) ia64_czx2_l_pred ((code), 0, r1, r3)
+#define ia64_czx1_r(code, r1, r3) ia64_czx1_r_pred ((code), 0, r1, r3)
+#define ia64_czx2_r(code, r1, r3) ia64_czx2_r_pred ((code), 0, r1, r3)
+
+#define ia64_ld1_hint(code, r1, r3, hint) ia64_ld1_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_hint(code, r1, r3, hint) ia64_ld2_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_hint(code, r1, r3, hint) ia64_ld4_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_hint(code, r1, r3, hint) ia64_ld8_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_s_hint(code, r1, r3, hint) ia64_ld1_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_s_hint(code, r1, r3, hint) ia64_ld2_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_s_hint(code, r1, r3, hint) ia64_ld4_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_s_hint(code, r1, r3, hint) ia64_ld8_s_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_a_hint(code, r1, r3, hint) ia64_ld1_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_a_hint(code, r1, r3, hint) ia64_ld2_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_a_hint(code, r1, r3, hint) ia64_ld4_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_a_hint(code, r1, r3, hint) ia64_ld8_a_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_sa_hint(code, r1, r3, hint) ia64_ld1_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_sa_hint(code, r1, r3, hint) ia64_ld2_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_sa_hint(code, r1, r3, hint) ia64_ld4_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_sa_hint(code, r1, r3, hint) ia64_ld8_sa_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_bias_hint(code, r1, r3, hint) ia64_ld1_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_bias_hint(code, r1, r3, hint) ia64_ld2_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_bias_hint(code, r1, r3, hint) ia64_ld4_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_bias_hint(code, r1, r3, hint) ia64_ld8_bias_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_acq_hint(code, r1, r3, hint) ia64_ld1_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_acq_hint(code, r1, r3, hint) ia64_ld2_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_acq_hint(code, r1, r3, hint) ia64_ld4_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_acq_hint(code, r1, r3, hint) ia64_ld8_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld8_fill_hint(code, r1, r3, hint) ia64_ld8_fill_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_hint(code, r1, r3, hint) ia64_ld1_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_hint(code, r1, r3, hint) ia64_ld2_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_hint(code, r1, r3, hint) ia64_ld4_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_hint(code, r1, r3, hint) ia64_ld8_c_clr_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_nc_hint(code, r1, r3, hint) ia64_ld1_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_nc_hint(code, r1, r3, hint) ia64_ld2_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_nc_hint(code, r1, r3, hint) ia64_ld4_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_nc_hint(code, r1, r3, hint) ia64_ld8_c_nc_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_acq_hint(code, r1, r3, hint) ia64_ld1_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_acq_hint(code, r1, r3, hint) ia64_ld2_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_acq_hint(code, r1, r3, hint) ia64_ld4_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_acq_hint(code, r1, r3, hint) ia64_ld8_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld16_hint(code, r1, r3, hint) ia64_ld16_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld16_acq_hint(code, r1, r3, hint) ia64_ld16_acq_hint_pred ((code), 0, r1, r3, hint)
+
+
+#define ia64_ld1_inc_hint(code, r1, r2, r3, hint) ia64_ld1_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_inc_hint(code, r1, r2, r3, hint) ia64_ld2_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_inc_hint(code, r1, r2, r3, hint) ia64_ld4_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_inc_hint(code, r1, r2, r3, hint) ia64_ld8_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_s_inc_hint(code, r1, r2, r3, hint) ia64_ld1_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_s_inc_hint(code, r1, r2, r3, hint) ia64_ld2_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_s_inc_hint(code, r1, r2, r3, hint) ia64_ld4_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_s_inc_hint(code, r1, r2, r3, hint) ia64_ld8_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_a_inc_hint(code, r1, r2, r3, hint) ia64_ld1_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_a_inc_hint(code, r1, r2, r3, hint) ia64_ld2_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_a_inc_hint(code, r1, r2, r3, hint) ia64_ld4_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_a_inc_hint(code, r1, r2, r3, hint) ia64_ld8_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld1_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld2_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld4_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld8_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld1_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld2_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld4_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld8_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld8_fill_inc_hint(code, r1, r2, r3, hint) ia64_ld8_fill_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+
+#define ia64_ld1_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld8_fill_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_fill_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ld1(code, r1, r3) ia64_ld1_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2(code, r1, r3) ia64_ld2_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4(code, r1, r3) ia64_ld4_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8(code, r1, r3) ia64_ld8_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_s(code, r1, r3) ia64_ld1_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_s(code, r1, r3) ia64_ld2_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_s(code, r1, r3) ia64_ld4_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_s(code, r1, r3) ia64_ld8_s_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_a(code, r1, r3) ia64_ld1_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_a(code, r1, r3) ia64_ld2_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_a(code, r1, r3) ia64_ld4_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_a(code, r1, r3) ia64_ld8_a_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_sa(code, r1, r3) ia64_ld1_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_sa(code, r1, r3) ia64_ld2_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_sa(code, r1, r3) ia64_ld4_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_sa(code, r1, r3) ia64_ld8_sa_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_bias(code, r1, r3) ia64_ld1_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_bias(code, r1, r3) ia64_ld2_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_bias(code, r1, r3) ia64_ld4_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_bias(code, r1, r3) ia64_ld8_bias_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_acq(code, r1, r3) ia64_ld1_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_acq(code, r1, r3) ia64_ld2_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_acq(code, r1, r3) ia64_ld4_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_acq(code, r1, r3) ia64_ld8_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld8_fill(code, r1, r3) ia64_ld8_fill_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr(code, r1, r3) ia64_ld1_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr(code, r1, r3) ia64_ld2_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr(code, r1, r3) ia64_ld4_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr(code, r1, r3) ia64_ld8_c_clr_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_nc(code, r1, r3) ia64_ld1_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_nc(code, r1, r3) ia64_ld2_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_nc(code, r1, r3) ia64_ld4_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_nc(code, r1, r3) ia64_ld8_c_nc_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq(code, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr_acq(code, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr_acq(code, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr_acq(code, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld16(code, r1, r3) ia64_ld16_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld16_acq(code, r1, r3) ia64_ld16_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_inc(code, r1, r2, r3) ia64_ld1_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_inc(code, r1, r2, r3) ia64_ld2_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_inc(code, r1, r2, r3) ia64_ld4_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_inc(code, r1, r2, r3) ia64_ld8_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc(code, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_s_inc(code, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_s_inc(code, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_s_inc(code, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc(code, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_a_inc(code, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_a_inc(code, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_a_inc(code, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc(code, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc(code, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc(code, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc(code, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc(code, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc(code, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc(code, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc(code, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc(code, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc(code, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc(code, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc(code, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc(code, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc(code, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc(code, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc(code, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc(code, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc(code, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc(code, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc(code, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc(code, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc(code, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc(code, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc(code, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc(code, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm(code, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm(code, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm(code, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm(code, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm(code, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm(code, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm(code, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm(code, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm(code, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm(code, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm(code, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm(code, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm(code, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm(code, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm(code, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm(code, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm(code, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm(code, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm(code, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm(code, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm(code, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm(code, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm(code, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm(code, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm(code, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm(code, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm(code, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm(code, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm(code, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_hint(code, r3, r2, hint) ia64_st1_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_hint(code, r3, r2, hint) ia64_st2_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_hint(code, r3, r2, hint) ia64_st4_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_hint(code, r3, r2, hint) ia64_st8_hint_pred ((code), 0, r3, r2, hint)
+
+/* Pseudo ops */
+#define ia64_st8(code, r3, r2) ia64_st8_hint ((code), (r3), (r2), 0)
+
+#define ia64_st1_rel_hint(code, r3, r2, hint) ia64_st1_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_rel_hint(code, r3, r2, hint) ia64_st2_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_rel_hint(code, r3, r2, hint) ia64_st4_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_rel_hint(code, r3, r2, hint) ia64_st8_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st8_spill_hint(code, r3, r2, hint) ia64_st8_spill_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st1_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st8_spill_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_spill_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+
+#define ia64_ldfs_hint(code, f1, r3, hint) ia64_ldfs_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_hint(code, f1, r3, hint) ia64_ldfd_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_hint(code, f1, r3, hint) ia64_ldf8_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_hint(code, f1, r3, hint) ia64_ldfe_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_s_hint(code, f1, r3, hint) ia64_ldfs_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_s_hint(code, f1, r3, hint) ia64_ldfd_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_s_hint(code, f1, r3, hint) ia64_ldf8_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_s_hint(code, f1, r3, hint) ia64_ldfe_s_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_a_hint(code, f1, r3, hint) ia64_ldfs_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_a_hint(code, f1, r3, hint) ia64_ldfd_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_a_hint(code, f1, r3, hint) ia64_ldf8_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_a_hint(code, f1, r3, hint) ia64_ldfe_a_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_sa_hint(code, f1, r3, hint) ia64_ldfs_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_sa_hint(code, f1, r3, hint) ia64_ldfd_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_sa_hint(code, f1, r3, hint) ia64_ldf8_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_sa_hint(code, f1, r3, hint) ia64_ldfe_sa_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_clr_hint(code, f1, r3, hint) ia64_ldfs_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_clr_hint(code, f1, r3, hint) ia64_ldfd_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_clr_hint(code, f1, r3, hint) ia64_ldf8_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_clr_hint(code, f1, r3, hint) ia64_ldfe_c_clr_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_nc_hint(code, f1, r3, hint) ia64_ldfs_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_nc_hint(code, f1, r3, hint) ia64_ldfd_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_nc_hint(code, f1, r3, hint) ia64_ldf8_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_nc_hint(code, f1, r3, hint) ia64_ldfe_c_nc_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldf_fill_hint(code, f1, r3, hint) ia64_ldf_fill_hint_pred ((code), 0, f1, r3, hint)
+
+
+#define ia64_ldfs_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_s_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_a_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldf_fill_inc_hint(code, f1, r3, r2, hint) ia64_ldf_fill_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+
+#define ia64_ldfs_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldf_fill_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf_fill_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ldfs(code, f1, r3) ia64_ldfs_pred (code, 0, f1, r3)
+#define ia64_ldfd(code, f1, r3) ia64_ldfd_pred (code, 0, f1, r3)
+#define ia64_ldf8(code, f1, r3) ia64_ldf8_pred (code, 0, f1, r3)
+#define ia64_ldfe(code, f1, r3) ia64_ldfe_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_s(code, f1, r3) ia64_ldfs_s_pred (code, 0, f1, r3)
+#define ia64_ldfd_s(code, f1, r3) ia64_ldfd_s_pred (code, 0, f1, r3)
+#define ia64_ldf8_s(code, f1, r3) ia64_ldf8_s_pred (code, 0, f1, r3)
+#define ia64_ldfe_s(code, f1, r3) ia64_ldfe_s_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_a(code, f1, r3) ia64_ldfs_a_pred (code, 0, f1, r3)
+#define ia64_ldfd_a(code, f1, r3) ia64_ldfd_a_pred (code, 0, f1, r3)
+#define ia64_ldf8_a(code, f1, r3) ia64_ldf8_a_pred (code, 0, f1, r3)
+#define ia64_ldfe_a(code, f1, r3) ia64_ldfe_a_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_sa(code, f1, r3) ia64_ldfs_sa_pred (code, 0, f1, r3)
+#define ia64_ldfd_sa(code, f1, r3) ia64_ldfd_sa_pred (code, 0, f1, r3)
+#define ia64_ldf8_sa(code, f1, r3) ia64_ldf8_sa_pred (code, 0, f1, r3)
+#define ia64_ldfe_sa(code, f1, r3) ia64_ldfe_sa_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_clr(code, f1, r3) ia64_ldfs_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_clr(code, f1, r3) ia64_ldfd_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_clr(code, f1, r3) ia64_ldf8_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_clr(code, f1, r3) ia64_ldfe_c_clr_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_nc(code, f1, r3) ia64_ldfs_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_nc(code, f1, r3) ia64_ldfd_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_nc(code, f1, r3) ia64_ldf8_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_nc(code, f1, r3) ia64_ldfe_c_nc_pred (code, 0, f1, r3)
+
+#define ia64_ldf_fill(code, f1, r3) ia64_ldf_fill_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_inc(code, f1, r3, r2) ia64_ldfs_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_inc(code, f1, r3, r2) ia64_ldfd_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_inc(code, f1, r3, r2) ia64_ldf8_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_inc(code, f1, r3, r2) ia64_ldfe_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_s_inc(code, f1, r3, r2) ia64_ldfs_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_s_inc(code, f1, r3, r2) ia64_ldfd_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_s_inc(code, f1, r3, r2) ia64_ldf8_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_s_inc(code, f1, r3, r2) ia64_ldfe_s_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_a_inc(code, f1, r3, r2) ia64_ldfs_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_a_inc(code, f1, r3, r2) ia64_ldfd_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_a_inc(code, f1, r3, r2) ia64_ldf8_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_a_inc(code, f1, r3, r2) ia64_ldfe_a_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_sa_inc(code, f1, r3, r2) ia64_ldfs_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_sa_inc(code, f1, r3, r2) ia64_ldfd_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_sa_inc(code, f1, r3, r2) ia64_ldf8_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_sa_inc(code, f1, r3, r2) ia64_ldfe_sa_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_clr_inc(code, f1, r3, r2) ia64_ldfs_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_clr_inc(code, f1, r3, r2) ia64_ldfd_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_clr_inc(code, f1, r3, r2) ia64_ldf8_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_clr_inc(code, f1, r3, r2) ia64_ldfe_c_clr_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_nc_inc(code, f1, r3, r2) ia64_ldfs_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_nc_inc(code, f1, r3, r2) ia64_ldfd_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_nc_inc(code, f1, r3, r2) ia64_ldf8_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_nc_inc(code, f1, r3, r2) ia64_ldfe_c_nc_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldf_fill_inc(code, f1, r3, r2) ia64_ldf_fill_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_inc_imm(code, f1, r3, imm) ia64_ldfs_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_inc_imm(code, f1, r3, imm) ia64_ldfd_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_inc_imm(code, f1, r3, imm) ia64_ldf8_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_inc_imm(code, f1, r3, imm) ia64_ldfe_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_s_inc_imm(code, f1, r3, imm) ia64_ldfs_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_s_inc_imm(code, f1, r3, imm) ia64_ldfd_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_s_inc_imm(code, f1, r3, imm) ia64_ldf8_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_s_inc_imm(code, f1, r3, imm) ia64_ldfe_s_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_a_inc_imm(code, f1, r3, imm) ia64_ldfs_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_a_inc_imm(code, f1, r3, imm) ia64_ldfd_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_a_inc_imm(code, f1, r3, imm) ia64_ldf8_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_a_inc_imm(code, f1, r3, imm) ia64_ldfe_a_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_sa_inc_imm(code, f1, r3, imm) ia64_ldfs_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_sa_inc_imm(code, f1, r3, imm) ia64_ldfd_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_sa_inc_imm(code, f1, r3, imm) ia64_ldf8_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_sa_inc_imm(code, f1, r3, imm) ia64_ldfe_sa_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_clr_inc_imm(code, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_nc_inc_imm(code, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldf_fill_inc_imm(code, f1, r3, imm) ia64_ldf_fill_inc_imm_pred (code, 0, f1, r3, imm)
+
+/* End of pseudo ops */
+
+#define ia64_stfs_hint(code, r3, f2, hint) ia64_stfs_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfd_hint(code, r3, f2, hint) ia64_stfd_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf8_hint(code, r3, f2, hint) ia64_stf8_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfe_hint(code, r3, f2, hint) ia64_stfe_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf_spill_hint(code, r3, f2, hint) ia64_stf_spill_hint_pred ((code), 0, r3, f2, hint)
+
+
+#define ia64_stfs_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfs_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfd_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfd_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf8_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf8_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfe_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfe_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf_spill_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf_spill_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+
+
+#define ia64_ldfps_hint(code, f1, f2, r3, hint) ia64_ldfps_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_hint(code, f1, f2, r3, hint) ia64_ldfpd_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_hint(code, f1, f2, r3, hint) ia64_ldfp8_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_hint(code, f1, f2, r3, hint) ia64_ldfps_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_hint(code, f1, f2, r3, hint) ia64_ldfps_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+
+#define ia64_ldfps_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_lfetch_hint(code, r3, hint) ia64_lfetch_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_excl_hint(code, r3, hint) ia64_lfetch_excl_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_hint(code, r3, hint) ia64_lfetch_fault_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_excl_hint(code, r3, hint) ia64_lfetch_fault_excl_hint_pred ((code), 0, r3, hint)
+
+
+#define ia64_lfetch_inc_hint(code, r3, r2, hint) ia64_lfetch_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+
+
+#define ia64_lfetch_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+
+
+#define ia64_cmpxchg1_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg1_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg1_hint(code, r1, r3, r2, hint) ia64_xchg1_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg2_hint(code, r1, r3, r2, hint) ia64_xchg2_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg4_hint(code, r1, r3, r2, hint) ia64_xchg4_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg8_hint(code, r1, r3, r2, hint) ia64_xchg8_hint_pred ((code), 0, r1, r3, r2, hint)
+
+#define ia64_fetchadd4_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd4_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd8_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd4_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd4_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd8_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+
+
+#define ia64_setf_sig(code, f1, r2) ia64_setf_sig_pred ((code), 0, f1, r2)
+#define ia64_setf_exp(code, f1, r2) ia64_setf_exp_pred ((code), 0, f1, r2)
+#define ia64_setf_s(code, f1, r2) ia64_setf_s_pred ((code), 0, f1, r2)
+#define ia64_setf_d(code, f1, r2) ia64_setf_d_pred ((code), 0, f1, r2)
+
+
+#define ia64_getf_sig(code, r1, f2) ia64_getf_sig_pred ((code), 0, r1, f2)
+#define ia64_getf_exp(code, r1, f2) ia64_getf_exp_pred ((code), 0, r1, f2)
+#define ia64_getf_s(code, r1, f2) ia64_getf_s_pred ((code), 0, r1, f2)
+#define ia64_getf_d(code, r1, f2) ia64_getf_d_pred ((code), 0, r1, f2)
+
+
+#define ia64_chk_s_m(code, r2,disp) ia64_chk_s_m_pred ((code), 0, r2,disp)
+
+
+#define ia64_chk_s_float_m(code, f2,disp) ia64_chk_s_float_m_pred ((code), 0, f2,disp)
+
+
+#define ia64_chk_a_nc(code, r1,disp) ia64_chk_a_nc_pred ((code), 0, r1,disp)
+#define ia64_chk_a_clr(code, r1,disp) ia64_chk_a_clr_pred ((code), 0, r1,disp)
+
+
+#define ia64_chk_a_nc_float(code, f1,disp) ia64_chk_a_nc_float_pred ((code), 0, f1,disp)
+#define ia64_chk_a_clr_float(code, f1,disp) ia64_chk_a_clr_float_pred ((code), 0, f1,disp)
+
+
+#define ia64_invala(code) ia64_invala_pred ((code), 0)
+#define ia64_fwb(code) ia64_fwb_pred ((code), 0)
+#define ia64_mf(code) ia64_mf_pred ((code), 0)
+#define ia64_mf_a(code) ia64_mf_a_pred ((code), 0)
+#define ia64_srlz_d(code) ia64_srlz_d_pred ((code), 0)
+#define ia64_stlz_i(code) ia64_stlz_i_pred ((code), 0)
+#define ia64_sync_i(code) ia64_sync_i_pred ((code), 0)
+
+
+#define ia64_flushrs(code) ia64_flushrs_pred ((code), 0)
+#define ia64_loadrs(code) ia64_loadrs_pred ((code), 0)
+
+#define ia64_invala_e(code, r1) ia64_invala_e_pred ((code), 0, r1)
+
+
+#define ia64_invala_e_float(code, f1) ia64_invala_e_float_pred ((code), 0, f1)
+
+
+#define ia64_fc(code, r3) ia64_fc_pred ((code), 0, r3)
+#define ia64_fc_i(code, r3) ia64_fc_i_pred ((code), 0, r3)
+
+
+#define ia64_mov_to_ar_m(code, ar3, r2) ia64_mov_to_ar_m_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_m(code, ar3, imm) ia64_mov_to_ar_imm_m_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_m(code, r1, ar3) ia64_mov_from_ar_m_pred ((code), 0, r1, ar3)
+
+#define ia64_mov_to_cr(code, cr3, r2) ia64_mov_to_cr_pred ((code), 0, cr3, r2)
+
+
+#define ia64_mov_from_cr(code, r1, cr3) ia64_mov_from_cr_pred ((code), 0, r1, cr3)
+
+
+#define ia64_alloc(code, r1, i, l, o, r) ia64_alloc_pred ((code), 0, r1, i, l, o, r)
+
+
+#define ia64_mov_to_psr_l(code, r2) ia64_mov_to_psr_l_pred ((code), 0, r2)
+#define ia64_mov_to_psr_um(code, r2) ia64_mov_to_psr_um_pred ((code), 0, r2)
+
+
+#define ia64_mov_from_psr(code, r1) ia64_mov_from_psr_pred ((code), 0, r1)
+#define ia64_mov_from_psr_um(code, r1) ia64_mov_from_psr_um_pred ((code), 0, r1)
+
+
+#define ia64_break_m(code, imm) ia64_break_m_pred ((code), 0, imm)
+
+/* The System/Memory Management instruction encodings (M38-M47) */
+
+
+#define ia64_nop_m(code, imm) ia64_nop_m_pred ((code), 0, imm)
+#define ia64_hint_m(code, imm) ia64_hint_m_pred ((code), 0, imm)
+
+#define ia64_br_cond_hint(code, disp, bwh, ph, dh) ia64_br_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wexit_hint(code, disp, bwh, ph, dh) ia64_br_wexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wtop_hint(code, disp, bwh, ph, dh) ia64_br_wtop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_br_cloop_hint(code, disp, bwh, ph, dh) ia64_br_cloop_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_cexit_hint(code, disp, bwh, ph, dh) ia64_br_cexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_ctop_hint(code, disp, bwh, ph, dh) ia64_br_ctop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+#define ia64_br_call_hint(code, b1, disp, bwh, ph, dh) ia64_br_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+#define ia64_br_cond_reg_hint(code, b1, bwh, ph, dh) ia64_br_cond_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ia_reg_hint(code, b1, bwh, ph, dh) ia64_br_ia_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ret_reg_hint(code, b1, bwh, ph, dh) ia64_br_ret_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+
+#define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh)
+
+/* Pseudo ops */
+
+#define ia64_br_cond(code, disp) ia64_br_cond_pred (code, 0, disp)
+#define ia64_br_wexit(code, disp) ia64_br_wexit_pred (code, 0, disp)
+#define ia64_br_wtop(code, disp) ia64_br_wtop_pred (code, 0, disp)
+
+#define ia64_br_cloop(code, disp) ia64_br_cloop_pred (code, 0, disp)
+#define ia64_br_cexit(code, disp) ia64_br_cexit_pred (code, 0, disp)
+#define ia64_br_ctop(code, disp) ia64_br_ctop_pred (code, 0, disp)
+
+#define ia64_br_call(code, b1, disp) ia64_br_call_pred (code, 0, b1, disp)
+
+#define ia64_br_cond_reg(code, b1) ia64_br_cond_reg_pred (code, 0, b1)
+#define ia64_br_ia_reg(code, b1) ia64_br_ia_reg_pred (code, 0, b1)
+#define ia64_br_ret_reg(code, b1) ia64_br_ret_reg_pred (code, 0, b1)
+
+#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_pred (code, 0, b1, b2)
+
+/* End of pseudo ops */
+
+#define ia64_cover(code) ia64_cover_pred ((code), 0)
+#define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0)
+#define ia64_clrrrb_pr(code) ia64_clrrrb_pr_pred ((code), 0)
+#define ia64_rfi(code) ia64_rfi_pred ((code), 0)
+#define ia64_bsw_0(code) ia64_bsw_0_pred ((code), 0)
+#define ia64_bsw_1(code) ia64_bsw_1_pred ((code), 0)
+#define ia64_epc(code) ia64_epc_pred ((code), 0)
+
+
+#define ia64_break_b(code, imm) ia64_break_b_pred ((code), 0, imm)
+#define ia64_nop_b(code, imm) ia64_nop_b_pred ((code), 0, imm)
+#define ia64_hint_b(code, imm) ia64_hint_b_pred ((code), 0, imm)
+
+
+#define ia64_break_x(code, imm) ia64_break_x_pred ((code), 0, imm)
+
+
+#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, (r1), (imm))
+
+
+#define ia64_brl_cond_hint(code, disp, bwh, ph, dh) ia64_brl_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_brl_call_hint(code, b1, disp, bwh, ph, dh) ia64_brl_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+
+#define ia64_nop_x(code, imm) ia64_nop_x_pred ((code), 0, imm)
+#define ia64_hint_x(code, imm) ia64_hint_x_pred ((code), 0, imm)
+
+/*
+ * Pseudo-ops
+ */
+
+#define ia64_mov_pred(code, qp, r1, r3) ia64_adds_imm_pred ((code), (qp), (r1), 0, (r3))
+#define ia64_mov(code, r1, r3) ia64_mov_pred ((code), 0, (r1), (r3))
+
+/*
+ * FLOATING POINT
+ */
+
+#define ia64_fma_sf(code, f1, f3, f4, f2, sf) ia64_fma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_s_sf(code, f1, f3, f4, f2, sf) ia64_fma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_d_sf(code, f1, f3, f4, f2, sf) ia64_fma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpma_sf(code, f1, f3, f4, f2, sf) ia64_fpma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_sf(code, f1, f3, f4, f2, sf) ia64_fms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_s_sf(code, f1, f3, f4, f2, sf) ia64_fms_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_d_sf(code, f1, f3, f4, f2, sf) ia64_fms_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpms_sf(code, f1, f3, f4, f2, sf) ia64_fpms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_sf(code, f1, f3, f4, f2, sf) ia64_fnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_s_sf(code, f1, f3, f4, f2, sf) ia64_fnma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_d_sf(code, f1, f3, f4, f2, sf) ia64_fnma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpnma_sf(code, f1, f3, f4, f2, sf) ia64_fpnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf(code, f1, f3, sf) ia64_fnorm_s_sf_pred ((code), 0, (f1), (f3), (sf))
+#define ia64_fnorm_d_sf(code, f1, f3, sf) ia64_fnorm_d_sf_pred ((code), 0, (f1), (f3), (sf))
+
+#define ia64_xma_l(code, f1, f3, f4, f2) ia64_xma_l_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l(code, f1, f3, f4) ia64_xmpy_l_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_lu(code, f1, f3, f4) ia64_xmpy_lu_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_h(code, f1, f3, f4) ia64_xmpy_h_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_hu(code, f1, f3, f4) ia64_xmpy_hu_pred ((code), 0, (f1), (f3), (f4))
+
+#define ia64_fselect(code, f1, f3, f4, f2) ia64_fselect_pred ((code), 0, f1, f3, f4, f2)
+
+#define ia64_fcmp_eq_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_eq_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_gt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ne_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ne_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nlt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nlt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nle_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nle_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ngt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ngt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+#define ia64_fclass_m(code, p1, p2, f2, fclass) ia64_fclass_m_pred ((code), 0, p1, p2, f2, fclass)
+#define ia64_fclass_m_unc(code, p1, p2, f2, fclass) ia64_fclass_m_unc_pred ((code), 0, p1, p2, f2, fclass)
+
+#define ia64_frcpa_sf(code, f1, p2, f2, f3, sf) ia64_frcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+#define ia64_fprcpa_sf(code, f1, p2, f2, f3, sf) ia64_fprcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+
+#define ia64_frsqrta_sf(code, f1, p2, f3, sf) ia64_frsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+#define ia64_fprsqrta_sf(code, f1, p2, f3, sf) ia64_fprsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+
+#define ia64_fmin_sf(code, f1, f2, f3, sf) ia64_fmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fman_sf(code, f1, f2, f3, sf) ia64_fman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famin_sf(code, f1, f2, f3, sf) ia64_famin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famax_sf(code, f1, f2, f3, sf) ia64_famax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpmin_sf(code, f1, f2, f3, sf) ia64_fpmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpman_sf(code, f1, f2, f3, sf) ia64_fpman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamin_sf(code, f1, f2, f3, sf) ia64_fpamin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamax_sf(code, f1, f2, f3, sf) ia64_fpamax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_eq_sf(code, f1, f2, f3, sf) ia64_fpcmp_eq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_lt_sf(code, f1, f2, f3, sf) ia64_fpcmp_lt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_le_sf(code, f1, f2, f3, sf) ia64_fpcmp_le_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_unord_sf(code, f1, f2, f3, sf) ia64_fpcmp_unord_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_neq_sf(code, f1, f2, f3, sf) ia64_fpcmp_neq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nlt_sf(code, f1, f2, f3, sf) ia64_fpcmp_nlt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nle_sf(code, f1, f2, f3, sf) ia64_fpcmp_nle_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_ord_sf(code, f1, f2, f3, sf) ia64_fpcmp_ord_sf_pred ((code), 0, f1, f2, f3, sf)
+
+#define ia64_fmerge_s(code, f1, f2, f3) ia64_fmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_ns(code, f1, f2, f3) ia64_fmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_se(code, f1, f2, f3) ia64_fmerge_se_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_lr(code, f1, f2, f3) ia64_fmix_lr_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_r(code, f1, f2, f3) ia64_fmix_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_l(code, f1, f2, f3) ia64_fmix_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_r(code, f1, f2, f3) ia64_fsxt_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_l(code, f1, f2, f3) ia64_fsxt_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fpack(code, f1, f2, f3) ia64_fpack_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap(code, f1, f2, f3) ia64_fswap_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nl(code, f1, f2, f3) ia64_fswap_nl_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nr(code, f1, f2, f3) ia64_fswap_nr_pred ((code), 0, f1, f2, f3)
+#define ia64_fand(code, f1, f2, f3) ia64_fand_pred ((code), 0, f1, f2, f3)
+#define ia64_fandcm(code, f1, f2, f3) ia64_fandcm_pred ((code), 0, f1, f2, f3)
+#define ia64_for(code, f1, f2, f3) ia64_for_pred ((code), 0, f1, f2, f3)
+#define ia64_fxor(code, f1, f2, f3) ia64_fxor_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_s(code, f1, f2, f3) ia64_fpmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_ns(code, f1, f2, f3) ia64_fpmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_se(code, f1, f2, f3) ia64_fpmerge_se_pred ((code), 0, f1, f2, f3)
+
+/* Pseudo ops */
+#define ia64_fmov(code, f1, f3) ia64_fmov_pred ((code), 0, (f1), (f3))
+
+#define ia64_fcvt_fx_sf(code, f1, f2, sf) ia64_fcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_sf(code, f1, f2, sf) ia64_fcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_sf(code, f1, f2, sf) ia64_fpcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_sf(code, f1, f2, sf) ia64_fpcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+
+#define ia64_fcvt_xf(code, f1, f2) ia64_fcvt_xf_pred ((code), 0, f1, f2)
+
+#define ia64_fsetc_sf(code, amask, omask, sf) ia64_fsetc_sf_pred ((code), 0, amask, omask, sf)
+
+#define ia64_fclrf_sf(code, sf) ia64_fclrf_sf_pred ((code), 0, sf)
+
+#define ia64_fchkf_sf(code, disp, sf) ia64_fchkf_sf_pred ((code), 0, disp, sf)
+
+#define ia64_break_f(code, imm) ia64_break_f_pred ((code), 0, imm)
+
+
+#endif
diff --git a/src/arch/mips/.gitignore b/src/arch/mips/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/src/arch/mips/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/src/arch/mips/Makefile.am b/src/arch/mips/Makefile.am
new file mode 100644
index 0000000..1063365
--- /dev/null
+++ b/src/arch/mips/Makefile.am
@@ -0,0 +1,8 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-mips.la
+
+libmonoarch_mips_la_SOURCES = mips-codegen.h
+
+noinst_PROGRAMS = test
diff --git a/src/arch/mips/mips-codegen.h b/src/arch/mips/mips-codegen.h
new file mode 100644
index 0000000..1dbd1c6
--- /dev/null
+++ b/src/arch/mips/mips-codegen.h
@@ -0,0 +1,435 @@
+#ifndef __MIPS_CODEGEN_H__
+#define __MIPS_CODEGEN_H__
+/*
+ * Copyright (c) 2004 Novell, Inc
+ * Author: Paolo Molaro (lupus@ximian.com)
+ *
+ */
+
+/* registers */
+enum {
+ mips_zero,
+ mips_at, /* assembler temp */
+ mips_v0, /* return values */
+ mips_v1,
+ mips_a0, /* 4 - func arguments */
+ mips_a1,
+ mips_a2,
+ mips_a3,
+#if _MIPS_SIM == _ABIO32
+ mips_t0, /* 8 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+ mips_t4,
+ mips_t5,
+ mips_t6,
+ mips_t7,
+#elif _MIPS_SIM == _ABIN32
+ mips_a4, /* 4 more argument registers */
+ mips_a5,
+ mips_a6,
+ mips_a7,
+ mips_t0, /* 4 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+#endif
+ mips_s0, /* 16 calle saved */
+ mips_s1,
+ mips_s2,
+ mips_s3,
+ mips_s4,
+ mips_s5,
+ mips_s6,
+ mips_s7,
+ mips_t8, /* 24 temps */
+ mips_t9, /* 25 temp / pic call-through register */
+ mips_k0, /* 26 kernel-reserved */
+ mips_k1,
+ mips_gp, /* 28 */
+ mips_sp, /* stack pointer */
+ mips_fp, /* frame pointer */
+ mips_ra /* return address */
+};
+
+/* we treat the register file as containing just doubles... */
+enum {
+ mips_f0, /* return regs */
+ mips_f1,
+ mips_f2,
+ mips_f3,
+ mips_f4, /* temps */
+ mips_f5,
+ mips_f6,
+ mips_f7,
+ mips_f8,
+ mips_f9,
+ mips_f10,
+ mips_f11,
+ mips_f12, /* first arg */
+ mips_f13,
+ mips_f14, /* second arg */
+ mips_f15,
+ mips_f16, /* temps */
+ mips_f17,
+ mips_f18,
+ mips_f19,
+ mips_f20, /* callee saved */
+ mips_f21,
+ mips_f22,
+ mips_f23,
+ mips_f24,
+ mips_f25,
+ mips_f26,
+ mips_f27,
+ mips_f28,
+ mips_f29,
+ mips_f30,
+ mips_f31
+};
+
+/* prefetch hints */
+enum {
+ MIPS_FOR_LOAD,
+ MIPS_FOR_STORE,
+ MIPS_FOR_LOAD_STREAMED = 4,
+ MIPS_FOR_STORE_STREAMED,
+ MIPS_FOR_LOAD_RETAINED,
+ MIPS_FOR_STORE_RETAINED
+};
+
+/* coprocessors */
+enum {
+ MIPS_COP0,
+ MIPS_COP1,
+ MIPS_COP2,
+ MIPS_COP3
+};
+
+enum {
+ MIPS_FMT_SINGLE = 16,
+ MIPS_FMT_DOUBLE = 17,
+ MIPS_FMT_WORD = 20,
+ MIPS_FMT_LONG = 21,
+ MIPS_FMT3_SINGLE = 0,
+ MIPS_FMT3_DOUBLE = 1
+};
+
+/* fpu rounding mode */
+enum {
+ MIPS_ROUND_TO_NEAREST,
+ MIPS_ROUND_TO_ZERO,
+ MIPS_ROUND_TO_POSINF,
+ MIPS_ROUND_TO_NEGINF,
+ MIPS_ROUND_MASK = 3
+};
+
+/* fpu enable/cause flags, cc */
+enum {
+ MIPS_FPU_C_MASK = 1 << 23,
+ MIPS_INEXACT = 1,
+ MIPS_UNDERFLOW = 2,
+ MIPS_OVERFLOW = 4,
+ MIPS_DIVZERO = 8,
+ MIPS_INVALID = 16,
+ MIPS_NOTIMPL = 32,
+ MIPS_FPU_FLAGS_OFFSET = 2,
+ MIPS_FPU_ENABLES_OFFSET = 7,
+ MIPS_FPU_CAUSES_OFFSET = 12
+};
+
+/* fpu condition values - see manual entry for C.cond.fmt instructions */
+enum {
+ MIPS_FPU_F,
+ MIPS_FPU_UN,
+ MIPS_FPU_EQ,
+ MIPS_FPU_UEQ,
+ MIPS_FPU_OLT,
+ MIPS_FPU_ULT,
+ MIPS_FPU_OLE,
+ MIPS_FPU_ULE,
+ MIPS_FPU_SF,
+ MIPS_FPU_NGLE,
+ MIPS_FPU_SEQ,
+ MIPS_FPU_NGL,
+ MIPS_FPU_LT,
+ MIPS_FPU_NGE,
+ MIPS_FPU_LE,
+ MIPS_FPU_NGT
+};
+
+#if SIZEOF_REGISTER == 4
+
+#define MIPS_SW mips_sw
+#define MIPS_LW mips_lw
+#define MIPS_ADDU mips_addu
+#define MIPS_ADDIU mips_addiu
+#define MIPS_SWC1 mips_swc1
+#define MIPS_LWC1 mips_lwc1
+#define MIPS_MOVE mips_move
+
+#elif SIZEOF_REGISTER == 8
+
+#define MIPS_SW mips_sd
+#define MIPS_LW mips_ld
+#define MIPS_ADDU mips_daddu
+#define MIPS_ADDIU mips_daddiu
+#define MIPS_SWC1 mips_sdc1
+#define MIPS_LWC1 mips_ldc1
+#define MIPS_MOVE mips_dmove
+
+#else
+#error Unknown SIZEOF_REGISTER
+#endif
+
+#define mips_emit32(c,x) do { \
+ *((guint32 *) (void *)(c)) = x; \
+ (c) = (typeof(c))(((guint32 *)(void *)(c)) + 1); \
+ } while (0)
+
+#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((imm)&0xffff)))
+#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|((imm)&0x03ffffff)))
+#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func)))
+#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun)))
+
+#define mips_is_imm16(val) ((gint)(gshort)(gint)(val) == (gint)(val))
+
+/* Load always using lui/addiu pair (for later patching) */
+#define mips_load(c,D,v) do { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* load constant - no patch-up */
+#define mips_load_const(c,D,v) do { \
+ if (!mips_is_imm16 ((v))) { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ if (((guint32)(v)) & 0xffff) \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } \
+ else \
+ mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* arithmetric ops */
+#define mips_add(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,32)
+#define mips_addi(c,dest,src1,imm) mips_format_i(c,8,src1,dest,imm)
+#define mips_addu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,33)
+#define mips_addiu(c,dest,src1,imm) mips_format_i(c,9,src1,dest,imm)
+#define mips_dadd(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,44)
+#define mips_daddi(c,dest,src1,imm) mips_format_i(c,24,src1,dest,imm)
+#define mips_daddu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,45)
+#define mips_daddiu(c,dest,src1,imm) mips_format_i(c,25,src1,dest,imm)
+#define mips_dsub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,46)
+#define mips_dsubu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,47)
+#define mips_mul(c,dest,src1,src2) mips_format_r(c,28,src1,src2,dest,0,2)
+#define mips_sub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,34)
+#define mips_subu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,35)
+
+/* div and mul ops */
+#define mips_ddiv(c,src1,src2) mips_format_divmul(c,0,src1,src2,30)
+#define mips_ddivu(c,src1,src2) mips_format_divmul(c,0,src1,src2,31)
+#define mips_div(c,src1,src2) mips_format_divmul(c,0,src1,src2,26)
+#define mips_divu(c,src1,src2) mips_format_divmul(c,0,src1,src2,27)
+#define mips_dmult(c,src1,src2) mips_format_divmul(c,0,src1,src2,28)
+#define mips_dmultu(c,src1,src2) mips_format_divmul(c,0,src1,src2,29)
+#define mips_mult(c,src1,src2) mips_format_divmul(c,0,src1,src2,24)
+#define mips_multu(c,src1,src2) mips_format_divmul(c,0,src1,src2,25)
+
+/* shift ops */
+#define mips_dsll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,56)
+#define mips_dsll32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,60)
+#define mips_dsllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,20)
+#define mips_dsra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,59)
+#define mips_dsra32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,63)
+#define mips_dsrav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,23)
+#define mips_dsrl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,58)
+#define mips_dsrl32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,62)
+#define mips_dsrlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,22)
+#define mips_sll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,0)
+#define mips_sllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,4)
+#define mips_sra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,3)
+#define mips_srav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,7)
+#define mips_srl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,2)
+#define mips_srlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,6)
+
+/* logical ops */
+#define mips_and(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,36)
+#define mips_andi(c,dest,src1,imm) mips_format_i(c,12,src1,dest,imm)
+#define mips_nor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,39)
+#define mips_or(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,37)
+#define mips_ori(c,dest,src1,uimm) mips_format_i(c,13,src1,dest,uimm)
+#define mips_xor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,38)
+#define mips_xori(c,dest,src1,uimm) mips_format_i(c,14,src1,dest,uimm)
+
+/* compares */
+#define mips_slt(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,42)
+#define mips_slti(c,dest,src1,imm) mips_format_i(c,10,src1,dest,imm)
+#define mips_sltiu(c,dest,src1,imm) mips_format_i(c,11,src1,dest,imm)
+#define mips_sltu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,43)
+/* missing traps: teq, teqi, tge, tgei, tgeiu, tgeu, tlt, tlti, tltiu, tltu, tne, tnei, */
+
+/* conditional branches */
+#define mips_beq(c,src1,src2,offset) mips_format_i(c,4,src1,src2,offset)
+#define mips_beql(c,src1,src2,offset) mips_format_i(c,20,src1,src2,offset)
+#define mips_bgez(c,src1,offset) mips_format_i(c,1,src1,1,offset)
+#define mips_bgezal(c,src1,offset) mips_format_i(c,1,src1,17,offset)
+#define mips_bgezall(c,src1,offset) mips_format_i(c,1,src1,19,offset)
+#define mips_bgezl(c,src1,offset) mips_format_i(c,1,src1,3,offset)
+#define mips_bgtz(c,src1,offset) mips_format_i(c,7,src1,0,offset)
+#define mips_bgtzl(c,src1,offset) mips_format_i(c,23,src1,0,offset)
+#define mips_blez(c,src1,offset) mips_format_i(c,6,src1,0,offset)
+#define mips_blezl(c,src1,offset) mips_format_i(c,22,src1,0,offset)
+#define mips_bltz(c,src1,offset) mips_format_i(c,1,src1,0,offset)
+#define mips_bltzal(c,src1,offset) mips_format_i(c,1,src1,16,offset)
+#define mips_bltzall(c,src1,offset) mips_format_i(c,1,src1,18,offset)
+#define mips_bltzl(c,src1,offset) mips_format_i(c,1,src1,2,offset)
+#define mips_bne(c,src1,src2,offset) mips_format_i(c,5,src1,src2,offset)
+#define mips_bnel(c,src1,src2,offset) mips_format_i(c,21,src1,src2,offset)
+
+/* uncond branches and calls */
+#define mips_jump(c,target) mips_format_j(c,2,target)
+#define mips_jumpl(c,target) mips_format_j(c,3,target)
+#define mips_jalr(c,src1,retreg) mips_format_r(c,0,src1,0,retreg,0,9)
+#define mips_jr(c,src1) mips_emit32(c,((src1)<<21)|8)
+
+/* loads and stores */
+#define mips_lb(c,dest,base,offset) mips_format_i(c,32,base,dest,offset)
+#define mips_lbu(c,dest,base,offset) mips_format_i(c,36,base,dest,offset)
+#define mips_ld(c,dest,base,offset) mips_format_i(c,55,base,dest,offset)
+#define mips_ldl(c,dest,base,offset) mips_format_i(c,26,base,dest,offset)
+#define mips_ldr(c,dest,base,offset) mips_format_i(c,27,base,dest,offset)
+#define mips_lh(c,dest,base,offset) mips_format_i(c,33,base,dest,offset)
+#define mips_lhu(c,dest,base,offset) mips_format_i(c,37,base,dest,offset)
+#define mips_ll(c,dest,base,offset) mips_format_i(c,48,base,dest,offset)
+#define mips_lld(c,dest,base,offset) mips_format_i(c,52,base,dest,offset)
+#define mips_lui(c,dest,base,uimm) mips_format_i(c,15,base,dest,uimm)
+#define mips_lw(c,dest,base,offset) mips_format_i(c,35,base,dest,offset)
+#define mips_lwl(c,dest,base,offset) mips_format_i(c,34,base,dest,offset)
+#define mips_lwr(c,dest,base,offset) mips_format_i(c,38,base,dest,offset)
+#define mips_lwu(c,dest,base,offset) mips_format_i(c,39,base,dest,offset)
+
+#define mips_sb(c,src,base,offset) mips_format_i(c,40,base,src,offset)
+#define mips_sc(c,src,base,offset) mips_format_i(c,56,base,src,offset)
+#define mips_scd(c,src,base,offset) mips_format_i(c,60,base,src,offset)
+#define mips_sd(c,src,base,offset) mips_format_i(c,63,base,src,offset)
+#define mips_sdl(c,src,base,offset) mips_format_i(c,44,base,src,offset)
+#define mips_sdr(c,src,base,offset) mips_format_i(c,45,base,src,offset)
+#define mips_sh(c,src,base,offset) mips_format_i(c,41,base,src,offset)
+#define mips_sw(c,src,base,offset) mips_format_i(c,43,base,src,offset)
+#define mips_swl(c,src,base,offset) mips_format_i(c,50,base,src,offset)
+#define mips_swr(c,src,base,offset) mips_format_i(c,54,base,src,offset)
+
+/* misc and coprocessor ops */
+#define mips_move(c,dest,src) mips_addu(c,dest,src,mips_zero)
+#define mips_dmove(c,dest,src) mips_daddu(c,dest,src,mips_zero)
+#define mips_nop(c) mips_or(c,mips_at,mips_at,0)
+#define mips_break(c,code) mips_emit32(c, ((code)<<6)|13)
+#define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16)
+#define mips_mflo(c,dest) mips_format_r(c,0,0,0,dest,0,18)
+#define mips_mthi(c,src) mips_format_r(c,0,src,0,0,0,17)
+#define mips_mtlo(c,src) mips_format_r(c,0,src,0,0,0,19)
+#define mips_movn(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,11)
+#define mips_movz(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,10)
+#define mips_pref(c,hint,base,offset) mips_format_i(c,51,base,hint,offset)
+#define mips_prefidx(c,hint,base,idx) mips_format_r(c,19,base,idx,hint,0,15)
+#define mips_sync(c,stype) mips_emit32(c, ((stype)<<6)|15)
+#define mips_syscall(c,code) mips_emit32(c, ((code)<<6)|12)
+
+#define mips_cop(c,cop,fun) mips_emit32(c, ((16|(cop))<<26)|(fun))
+#define mips_ldc(c,cop,dest,base,offset) mips_format_i(c,(52|(cop)),base,dest,offset)
+#define mips_lwc(c,cop,dest,base,offset) mips_format_i(c,(48|(cop)),base,dest,offset)
+#define mips_sdc(c,cop,src,base,offset) mips_format_i(c,(60|(cop)),base,src,offset)
+#define mips_swc(c,cop,src,base,offset) mips_format_i(c,(56|(cop)),base,src,offset)
+#define mips_cfc1(c,dest,src) mips_format_r(c,17,2,dest,src,0,0)
+#define mips_ctc1(c,dest,src) mips_format_r(c,17,6,dest,src,0,0)
+
+/* fpu ops */
+#define mips_fabss(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,5)
+#define mips_fabsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,5)
+#define mips_fadds(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,0)
+#define mips_faddd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,0)
+#define mips_fdivs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,3)
+#define mips_fdivd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,3)
+#define mips_fmuls(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,2)
+#define mips_fmuld(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,2)
+#define mips_fnegs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,7)
+#define mips_fnegd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,7)
+#define mips_fsqrts(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,4)
+#define mips_fsqrtd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,4)
+#define mips_fsubs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,1)
+#define mips_fsubd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,1)
+#define mips_madds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_SINGLE)
+#define mips_maddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_DOUBLE)
+#define mips_nmadds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_SINGLE)
+#define mips_nmaddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_DOUBLE)
+#define mips_msubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_SINGLE)
+#define mips_msubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_DOUBLE)
+#define mips_nmsubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_SINGLE)
+#define mips_nmsubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_DOUBLE)
+
+/* fp compare and branch */
+#define mips_fcmps(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fcmpd(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fbfalse(c,offset) mips_format_i(c,17,8,0,offset)
+#define mips_fbfalsel(c,offset) mips_format_i(c,17,8,2,offset)
+#define mips_fbtrue(c,offset) mips_format_i(c,17,8,1,offset)
+#define mips_fbtruel(c,offset) mips_format_i(c,17,8,3,offset)
+
+/* fp convert */
+#define mips_ceills(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,10)
+#define mips_ceilld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,10)
+#define mips_ceilws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,14)
+#define mips_ceilwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,14)
+#define mips_cvtds(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,33)
+#define mips_cvtdw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,33)
+#define mips_cvtdl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,33)
+#define mips_cvtls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,37)
+#define mips_cvtld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,37)
+#define mips_cvtsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,32)
+#define mips_cvtsw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,32)
+#define mips_cvtsl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,32)
+#define mips_cvtws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,36)
+#define mips_cvtwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,36)
+#define mips_floorls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,11)
+#define mips_floorld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,11)
+#define mips_floorws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,15)
+#define mips_floorwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,15)
+#define mips_roundls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,8)
+#define mips_roundld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,8)
+#define mips_roundws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,12)
+#define mips_roundwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,12)
+#define mips_truncls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,9)
+#define mips_truncld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,9)
+#define mips_truncws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,13)
+#define mips_truncwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,13)
+
+/* fp moves, loads */
+#define mips_fmovs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,6)
+#define mips_fmovd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,6)
+#define mips_mfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0)
+#define mips_mtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0)
+#define mips_dmfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0)
+#define mips_dmtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0)
+#define mips_ldc1(c,dest,base,offset) mips_ldc(c,1,dest,base,offset)
+#define mips_ldxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,1)
+#define mips_lwc1(c,dest,base,offset) mips_lwc(c,1,dest,base,offset)
+#define mips_lwxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,0)
+#define mips_sdc1(c,src,base,offset) mips_sdc(c,1,src,base,offset)
+#define mips_sdxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,9)
+#define mips_swc1(c,src,base,offset) mips_swc(c,1,src,base,offset)
+#define mips_swxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,8)
+
+#endif /* __MIPS_CODEGEN_H__ */
+
diff --git a/src/arch/mips/test.c b/src/arch/mips/test.c
new file mode 100644
index 0000000..4f5e1ad
--- /dev/null
+++ b/src/arch/mips/test.c
@@ -0,0 +1,159 @@
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_MIPS_JIT_DEBUG
+
+#include "mips-codegen.h"
+#include "mono/metadata/class.h"
+
+/* don't run the resulting program, it will destroy your computer,
+ * just objdump -d it to inspect we generated the correct assembler.
+ */
+
+int main (int argc, char *argv[]) {
+ guint32 *code, * p;
+
+ code = p = (guint32 *) malloc (sizeof (guint32) * 1024);
+
+ mips_add (p, 3, 4, 5);
+ mips_addi (p, 3, 4, 5);
+ mips_addu (p, 3, 4, 5);
+ mips_addiu (p, 3, 4, 5);
+ mips_sub (p, 3, 4, 5);
+ mips_subu (p, 3, 4, 5);
+ mips_dadd (p, 3, 4, 5);
+ mips_daddi (p, 3, 4, 5);
+ mips_daddu (p, 3, 4, 5);
+ mips_daddiu (p, 3, 4, 5);
+ mips_dsub (p, 3, 4, 5);
+ mips_dsubu (p, 3, 4, 5);
+
+ mips_mult (p, 6, 7);
+ mips_multu (p, 6, 7);
+ mips_div (p, 6, 7);
+ mips_divu (p, 6, 7);
+ mips_dmult (p, 6, 7);
+ mips_dmultu (p, 6, 7);
+ mips_ddiv (p, 6, 7);
+ mips_ddivu (p, 6, 7);
+
+ mips_sll (p, 3, 4, 5);
+ mips_sllv (p, 3, 4, 5);
+ mips_sra (p, 3, 4, 5);
+ mips_srav (p, 3, 4, 5);
+ mips_srl (p, 3, 4, 5);
+ mips_srlv (p, 3, 4, 5);
+ mips_dsll (p, 3, 4, 5);
+ mips_dsll32 (p, 3, 4, 5);
+ mips_dsllv (p, 3, 4, 5);
+ mips_dsra (p, 3, 4, 5);
+ mips_dsra32 (p, 3, 4, 5);
+ mips_dsrav (p, 3, 4, 5);
+ mips_dsrl (p, 3, 4, 5);
+ mips_dsrl32 (p, 3, 4, 5);
+ mips_dsrlv (p, 3, 4, 5);
+
+ mips_and (p, 8, 9, 10);
+ mips_andi (p, 8, 9, 10);
+ mips_nor (p, 8, 9, 10);
+ mips_or (p, 8, 9, 10);
+ mips_ori (p, 8, 9, 10);
+ mips_xor (p, 8, 9, 10);
+ mips_xori (p, 8, 9, 10);
+
+ mips_slt (p, 8, 9, 10);
+ mips_slti (p, 8, 9, 10);
+ mips_sltu (p, 8, 9, 10);
+ mips_sltiu (p, 8, 9, 10);
+
+ mips_beq (p, 8, 9, 0xff1f);
+ mips_beql (p, 8, 9, 0xff1f);
+ mips_bne (p, 8, 9, 0xff1f);
+ mips_bnel (p, 8, 9, 0xff1f);
+ mips_bgez (p, 11, 0xff1f);
+ mips_bgezal (p, 11, 0xff1f);
+ mips_bgezall (p, 11, 0xff1f);
+ mips_bgezl (p, 11, 0xff1f);
+ mips_bgtz (p, 11, 0xff1f);
+ mips_bgtzl (p, 11, 0xff1f);
+ mips_blez (p, 11, 0xff1f);
+ mips_blezl (p, 11, 0xff1f);
+ mips_bltz (p, 11, 0xff1f);
+ mips_bltzal (p, 11, 0xff1f);
+ mips_bltzall (p, 11, 0xff1f);
+ mips_bltzl (p, 11, 0xff1f);
+
+ mips_jump (p, 0xff1f);
+ mips_jumpl (p, 0xff1f);
+ mips_jalr (p, 12, mips_ra);
+ mips_jr (p, 12);
+
+ mips_lb (p, 13, 14, 128);
+ mips_lbu (p, 13, 14, 128);
+ mips_ld (p, 13, 14, 128);
+ mips_ldl (p, 13, 14, 128);
+ mips_ldr (p, 13, 14, 128);
+ mips_lh (p, 13, 14, 128);
+ mips_lhu (p, 13, 14, 128);
+ mips_ll (p, 13, 14, 128);
+ mips_lld (p, 13, 14, 128);
+ mips_lui (p, 13, 14, 128);
+ mips_lw (p, 13, 14, 128);
+ mips_lwl (p, 13, 14, 128);
+ mips_lwr (p, 13, 14, 128);
+ mips_lwu (p, 13, 14, 128);
+ mips_sb (p, 13, 14, 128);
+ mips_sc (p, 13, 14, 128);
+ mips_scd (p, 13, 14, 128);
+ mips_sd (p, 13, 14, 128);
+ mips_sdl (p, 13, 14, 128);
+ mips_sdr (p, 13, 14, 128);
+ mips_sh (p, 13, 14, 128);
+ mips_sw (p, 13, 14, 128);
+ mips_swl (p, 13, 14, 128);
+ mips_swr (p, 13, 14, 128);
+
+ mips_move (p, 15, 16);
+ mips_nop (p);
+ mips_break (p, 0);
+ mips_sync (p, 0);
+ mips_mfhi (p, 17);
+ mips_mflo (p, 17);
+ mips_mthi (p, 17);
+ mips_mtlo (p, 17);
+
+ mips_fabsd (p, 16, 18);
+ mips_fnegd (p, 16, 18);
+ mips_fsqrtd (p, 16, 18);
+ mips_faddd (p, 16, 18, 20);
+ mips_fdivd (p, 16, 18, 20);
+ mips_fmuld (p, 16, 18, 20);
+ mips_fsubd (p, 16, 18, 20);
+
+ mips_fcmpd (p, MIPS_FPU_EQ, 18, 20);
+ mips_fbfalse (p, 0xff1f);
+ mips_fbfalsel (p, 0xff1f);
+ mips_fbtrue (p, 0xff1f);
+ mips_fbtruel (p, 0xff1f);
+
+ mips_ceilwd (p, 20, 22);
+ mips_ceilld (p, 20, 22);
+ mips_floorwd (p, 20, 22);
+ mips_floorld (p, 20, 22);
+ mips_roundwd (p, 20, 22);
+ mips_roundld (p, 20, 22);
+ mips_truncwd (p, 20, 22);
+ mips_truncld (p, 20, 22);
+ mips_cvtdw (p, 20, 22);
+ mips_cvtds (p, 20, 22);
+ mips_cvtdl (p, 20, 22);
+ mips_cvtld (p, 20, 22);
+ mips_cvtsd (p, 20, 22);
+ mips_cvtwd (p, 20, 22);
+
+ mips_fmovd (p, 20, 22);
+ printf ("size: %d\n", p - code);
+
+ return 0;
+}
diff --git a/src/arch/ppc/.gitignore b/src/arch/ppc/.gitignore
new file mode 100644
index 0000000..c577ff6
--- /dev/null
+++ b/src/arch/ppc/.gitignore
@@ -0,0 +1,7 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
+/test
diff --git a/src/arch/ppc/Makefile.am b/src/arch/ppc/Makefile.am
new file mode 100644
index 0000000..9b209ef
--- /dev/null
+++ b/src/arch/ppc/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = ppc-codegen.h \ No newline at end of file
diff --git a/src/arch/ppc/ppc-codegen.h b/src/arch/ppc/ppc-codegen.h
new file mode 100644
index 0000000..55b5060
--- /dev/null
+++ b/src/arch/ppc/ppc-codegen.h
@@ -0,0 +1,953 @@
+/*
+ Authors:
+ Radek Doulik
+ Christopher Taylor <ct_AT_clemson_DOT_edu>
+ Andreas Faerber <andreas.faerber@web.de>
+
+ Copyright (C) 2001 Radek Doulik
+ Copyright (C) 2007-2008 Andreas Faerber
+
+ for testing do the following: ./test | as -o test.o
+*/
+
+#ifndef __MONO_PPC_CODEGEN_H__
+#define __MONO_PPC_CODEGEN_H__
+#include <glib.h>
+#include <assert.h>
+
+typedef enum {
+ ppc_r0 = 0,
+ ppc_r1,
+ ppc_sp = ppc_r1,
+ ppc_r2,
+ ppc_r3,
+ ppc_r4,
+ ppc_r5,
+ ppc_r6,
+ ppc_r7,
+ ppc_r8,
+ ppc_r9,
+ ppc_r10,
+ ppc_r11,
+ ppc_r12,
+ ppc_r13,
+ ppc_r14,
+ ppc_r15,
+ ppc_r16,
+ ppc_r17,
+ ppc_r18,
+ ppc_r19,
+ ppc_r20,
+ ppc_r21,
+ ppc_r22,
+ ppc_r23,
+ ppc_r24,
+ ppc_r25,
+ ppc_r26,
+ ppc_r27,
+ ppc_r28,
+ ppc_r29,
+ ppc_r30,
+ ppc_r31
+} PPCIntRegister;
+
+typedef enum {
+ ppc_f0 = 0,
+ ppc_f1,
+ ppc_f2,
+ ppc_f3,
+ ppc_f4,
+ ppc_f5,
+ ppc_f6,
+ ppc_f7,
+ ppc_f8,
+ ppc_f9,
+ ppc_f10,
+ ppc_f11,
+ ppc_f12,
+ ppc_f13,
+ ppc_f14,
+ ppc_f15,
+ ppc_f16,
+ ppc_f17,
+ ppc_f18,
+ ppc_f19,
+ ppc_f20,
+ ppc_f21,
+ ppc_f22,
+ ppc_f23,
+ ppc_f24,
+ ppc_f25,
+ ppc_f26,
+ ppc_f27,
+ ppc_f28,
+ ppc_f29,
+ ppc_f30,
+ ppc_f31
+} PPCFloatRegister;
+
+typedef enum {
+ ppc_lr = 256,
+ ppc_ctr = 256 + 32,
+ ppc_xer = 32
+} PPCSpecialRegister;
+
+enum {
+ /* B0 operand for branches */
+ PPC_BR_DEC_CTR_NONZERO_FALSE = 0,
+ PPC_BR_LIKELY = 1, /* can be or'ed with the conditional variants */
+ PPC_BR_DEC_CTR_ZERO_FALSE = 2,
+ PPC_BR_FALSE = 4,
+ PPC_BR_DEC_CTR_NONZERO_TRUE = 8,
+ PPC_BR_DEC_CTR_ZERO_TRUE = 10,
+ PPC_BR_TRUE = 12,
+ PPC_BR_DEC_CTR_NONZERO = 16,
+ PPC_BR_DEC_CTR_ZERO = 18,
+ PPC_BR_ALWAYS = 20,
+ /* B1 operand for branches */
+ PPC_BR_LT = 0,
+ PPC_BR_GT = 1,
+ PPC_BR_EQ = 2,
+ PPC_BR_SO = 3
+};
+
+enum {
+ PPC_TRAP_LT = 1,
+ PPC_TRAP_GT = 2,
+ PPC_TRAP_EQ = 4,
+ PPC_TRAP_LT_UN = 8,
+ PPC_TRAP_GT_UN = 16,
+ PPC_TRAP_LE = 1 + PPC_TRAP_EQ,
+ PPC_TRAP_GE = 2 + PPC_TRAP_EQ,
+ PPC_TRAP_LE_UN = 8 + PPC_TRAP_EQ,
+ PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ
+};
+
+#define ppc_emit32(c,x) do { *((guint32 *) (c)) = GUINT32_TO_BE (x); (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0)
+
+#define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1))
+#define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L)
+#define ppc_ha(val) (((val >> 16) + ((val & 0x8000) ? 1 : 0)) & 0xffff)
+
+#define ppc_load32(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), (guint32)(v) >> 16); \
+ ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \
+ } G_STMT_END
+
+/* Macros to load/store pointer sized quantities */
+
+#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
+
+#define ppc_ldptr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+
+#else
+
+/* Same as ppc32 */
+#define ppc_ldptr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+
+#endif
+
+/* Macros to load pointer sized immediates */
+#define ppc_load_ptr(c,D,v) ppc_load ((c),(D),(gsize)(v))
+#define ppc_load_ptr_sequence(c,D,v) ppc_load_sequence ((c),(D),(gsize)(v))
+
+/* Macros to load/store regsize quantities */
+
+#ifdef __mono_ppc64__
+#define ppc_ldr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+#else
+#define ppc_ldr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+#endif
+
+#define ppc_str_multiple(c,S,d,A) ppc_store_multiple_regs((c),(S),(d),(A))
+#define ppc_ldr_multiple(c,D,d,A) ppc_load_multiple_regs((c),(D),(d),(A))
+
+/* PPC32 macros */
+
+#ifndef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v))
+
+#define PPC_LOAD_SEQUENCE_LENGTH 8
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint32)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint32)(v)); \
+ } else { \
+ ppc_load32 ((c), (D), (guint32)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V))
+
+#define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A))
+
+#define ppc_store_multiple_regs(c,S,d,A) ppc_stmw ((c), (S), (d), (A))
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 0, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_slw((c), (S), (A), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_slwi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srwi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_srawi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mullw((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrwi((c), (A), (S), (n))
+
+#endif
+
+#define ppc_opcode(c) ((c) >> 26)
+#define ppc_split_5_1_1(x) (((x) >> 5) & 0x1)
+#define ppc_split_5_1_5(x) ((x) & 0x1F)
+#define ppc_split_5_1(x) ((ppc_split_5_1_5(x) << 1) | ppc_split_5_1_1(x))
+
+#define ppc_break(c) ppc_tw((c),31,0,0)
+#define ppc_addi(c,D,A,i) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addis(c,D,A,i) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v))
+#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v))
+#define ppc_lwz(c,D,d,A) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lhz(c,D,d,A) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lbz(c,D,d,A) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stw(c,S,d,A) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_sth(c,S,d,A) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stb(c,S,d,A) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stwu(c,s,d,A) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888)
+#define ppc_mr(c,a,s) ppc_or (c, a, s, s)
+#define ppc_ori(c,S,A,ui) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(ui))
+#define ppc_nop(c) ppc_ori (c, 0, 0, 0)
+#define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1))
+#define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr)
+#define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1))
+#define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S)
+#define ppc_mtctr(c,S) ppc_mtspr (c, ppc_ctr, S)
+#define ppc_mtxer(c,S) ppc_mtspr (c, ppc_xer, S)
+
+#define ppc_b(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2))
+#define ppc_bl(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 1)
+#define ppc_ba(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 2)
+#define ppc_bla(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 3)
+#define ppc_blrl(c) ppc_emit32 (c, 0x4e800021)
+#define ppc_blr(c) ppc_emit32 (c, 0x4e800020)
+
+#define ppc_lfs(c,D,d,A) ppc_emit32 (c, (48 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lfd(c,D,d,A) ppc_emit32 (c, (50 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stfs(c,S,d,a) ppc_emit32 (c, (52 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+#define ppc_stfd(c,S,d,a) ppc_emit32 (c, (54 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+
+/***********************************************************************
+The macros below were tapped out by Christopher Taylor <ct_AT_clemson_DOT_edu>
+from 18 November 2002 to 19 December 2002.
+
+Special thanks to rodo, lupus, dietmar, miguel, and duncan for patience,
+and motivation.
+
+The macros found in this file are based on the assembler instructions found
+in Motorola and Digital DNA's:
+
+"Programming Enviornments Manual For 32-bit Implementations of the PowerPC Architecture"
+
+MPCFPE32B/AD
+12/2001
+REV2
+
+see pages 326 - 524 for detailed information regarding each instruction
+
+Also see the "Ximian Copyright Agreement, 2002" for more information regarding
+my and Ximian's copyright to this code. ;)
+*************************************************************************/
+
+#define ppc_addx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (266 << 1) | Rc)
+#define ppc_add(c,D,A,B) ppc_addx(c,D,A,B,0,0)
+#define ppc_addd(c,D,A,B) ppc_addx(c,D,A,B,0,1)
+#define ppc_addo(c,D,A,B) ppc_addx(c,D,A,B,1,0)
+#define ppc_addod(c,D,A,B) ppc_addx(c,D,A,B,1,1)
+
+#define ppc_addcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (10 << 1) | Rc)
+#define ppc_addc(c,D,A,B) ppc_addcx(c,D,A,B,0,0)
+#define ppc_addcd(c,D,A,B) ppc_addcx(c,D,A,B,0,1)
+#define ppc_addco(c,D,A,B) ppc_addcx(c,D,A,B,1,0)
+#define ppc_addcod(c,D,A,B) ppc_addcx(c,D,A,B,1,1)
+
+#define ppc_addex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (138 << 1) | Rc)
+#define ppc_adde(c,D,A,B) ppc_addex(c,D,A,B,0,0)
+#define ppc_added(c,D,A,B) ppc_addex(c,D,A,B,0,1)
+#define ppc_addeo(c,D,A,B) ppc_addex(c,D,A,B,1,0)
+#define ppc_addeod(c,D,A,B) ppc_addex(c,D,A,B,1,1)
+
+#define ppc_addic(c,D,A,i) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addicd(c,D,A,i) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+
+#define ppc_addmex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (234 << 1) | RC)
+#define ppc_addme(c,D,A) ppc_addmex(c,D,A,0,0)
+#define ppc_addmed(c,D,A) ppc_addmex(c,D,A,0,1)
+#define ppc_addmeo(c,D,A) ppc_addmex(c,D,A,1,0)
+#define ppc_addmeod(c,D,A) ppc_addmex(c,D,A,1,1)
+
+#define ppc_addzex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (202 << 1) | RC)
+#define ppc_addze(c,D,A) ppc_addzex(c,D,A,0,0)
+#define ppc_addzed(c,D,A) ppc_addzex(c,D,A,0,1)
+#define ppc_addzeo(c,D,A) ppc_addzex(c,D,A,1,0)
+#define ppc_addzeod(c,D,A) ppc_addzex(c,D,A,1,1)
+
+#define ppc_andx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (28 << 1) | RC)
+#define ppc_and(c,S,A,B) ppc_andx(c,S,A,B,0)
+#define ppc_andd(c,S,A,B) ppc_andx(c,S,A,B,1)
+
+#define ppc_andcx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (60 << 1) | RC)
+#define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0)
+#define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1)
+
+#define ppc_andid(c,S,A,ui) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+#define ppc_andisd(c,S,A,ui) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+
+#define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK)
+#define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0)
+#define ppc_bca(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,0)
+#define ppc_bcl(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,1)
+#define ppc_bcla(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,1)
+
+#define ppc_bcctrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (528 << 1) | LK)
+#define ppc_bcctr(c,BO,BI) ppc_bcctrx(c,BO,BI,0)
+#define ppc_bcctrl(c,BO,BI) ppc_bcctrx(c,BO,BI,1)
+
+#define ppc_bnectrp(c,BO,BI) ppc_bcctr(c,BO,BI)
+#define ppc_bnectrlp(c,BO,BI) ppc_bcctr(c,BO,BI)
+
+#define ppc_bclrx(c,BO,BI,BH,LK) ppc_emit32(c, (19 << 26) | ((BO) << 21 )| ((BI) << 16) | (0 << 13) | ((BH) << 11) | (16 << 1) | (LK))
+#define ppc_bclr(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,0)
+#define ppc_bclrl(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,1)
+
+#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+
+#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (0 << 1) | 0)
+#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (32 << 1) | 0)
+#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpw(c,cfrD,A,B) ppc_cmp(c, (cfrD), 0, (A), (B))
+
+#define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc)
+#define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0)
+#define ppc_cntlzwd(c,S,A) ppc_cntlzwx(c,S,A,1)
+
+#define ppc_crand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (257 << 1) | 0)
+#define ppc_crandc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (129 << 1) | 0)
+#define ppc_creqv(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (289 << 1) | 0)
+#define ppc_crnand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (225 << 1) | 0)
+#define ppc_crnor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (33 << 1) | 0)
+#define ppc_cror(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (449 << 1) | 0)
+#define ppc_crorc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (417 << 1) | 0)
+#define ppc_crxor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (193 << 1) | 0)
+
+#define ppc_dcba(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (758 << 1) | 0)
+#define ppc_dcbf(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (86 << 1) | 0)
+#define ppc_dcbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (470 << 1) | 0)
+#define ppc_dcbst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (54 << 1) | 0)
+#define ppc_dcbt(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (278 << 1) | 0)
+#define ppc_dcbtst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (246 << 1) | 0)
+#define ppc_dcbz(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (1014 << 1) | 0)
+
+#define ppc_divwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (491 << 1) | Rc)
+#define ppc_divw(c,D,A,B) ppc_divwx(c,D,A,B,0,0)
+#define ppc_divwd(c,D,A,B) ppc_divwx(c,D,A,B,0,1)
+#define ppc_divwo(c,D,A,B) ppc_divwx(c,D,A,B,1,0)
+#define ppc_divwod(c,D,A,B) ppc_divwx(c,D,A,B,1,1)
+
+#define ppc_divwux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (459 << 1) | Rc)
+#define ppc_divwu(c,D,A,B) ppc_divwux(c,D,A,B,0,0)
+#define ppc_divwud(c,D,A,B) ppc_divwux(c,D,A,B,0,1)
+#define ppc_divwuo(c,D,A,B) ppc_divwux(c,D,A,B,1,0)
+#define ppc_divwuod(c,D,A,B) ppc_divwux(c,D,A,B,1,1)
+
+#define ppc_eciwx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (310 << 1) | 0)
+#define ppc_ecowx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (438 << 1) | 0)
+#define ppc_eieio(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (854 << 1) | 0)
+
+#define ppc_eqvx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (284 << 1) | Rc)
+#define ppc_eqv(c,A,S,B) ppc_eqvx(c,A,S,B,0)
+#define ppc_eqvd(c,A,S,B) ppc_eqvx(c,A,S,B,1)
+
+#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (954 << 1) | Rc)
+#define ppc_extsb(c,A,S) ppc_extsbx(c,A,S,0)
+#define ppc_extsbd(c,A,S) ppc_extsbx(c,A,S,1)
+
+#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (922 << 1) | Rc)
+#define ppc_extsh(c,A,S) ppc_extshx(c,A,S,0)
+#define ppc_extshd(c,A,S) ppc_extshx(c,A,S,1)
+
+#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (264 << 1) | Rc)
+#define ppc_fabs(c,D,B) ppc_fabsx(c,D,B,0)
+#define ppc_fabsd(c,D,B) ppc_fabsx(c,D,B,1)
+
+#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadd(c,D,A,B) ppc_faddx(c,D,A,B,0)
+#define ppc_faddd(c,D,A,B) ppc_faddx(c,D,A,B,1)
+
+#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadds(c,D,A,B) ppc_faddsx(c,D,A,B,0)
+#define ppc_faddsd(c,D,A,B) ppc_faddsx(c,D,A,B,1)
+
+#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (32 << 1) | 0)
+#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (0 << 1) | 0)
+
+#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (14 << 1) | Rc)
+#define ppc_fctiw(c,D,B) ppc_fctiwx(c,D,B,0)
+#define ppc_fctiwd(c,D,B) ppc_fctiwx(c,D,B,1)
+
+#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (15 << 1) | Rc)
+#define ppc_fctiwz(c,D,B) ppc_fctiwzx(c,D,B,0)
+#define ppc_fctiwzd(c,D,B) ppc_fctiwzx(c,D,B,1)
+
+#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdiv(c,D,A,B) ppc_fdivx(c,D,A,B,0)
+#define ppc_fdivd(c,D,A,B) ppc_fdivx(c,D,A,B,1)
+
+#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdivs(c,D,A,B) ppc_fdivsx(c,D,A,B,0)
+#define ppc_fdivsd(c,D,A,B) ppc_fdivsx(c,D,A,B,1)
+
+#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,0)
+#define ppc_fmaddd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,1)
+
+#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadds(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,0)
+#define ppc_fmaddsd(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,1)
+
+#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (72 << 1) | Rc)
+#define ppc_fmr(c,D,B) ppc_fmrx(c,D,B,0)
+#define ppc_fmrd(c,D,B) ppc_fmrx(c,D,B,1)
+
+#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsub(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,0)
+#define ppc_fmsubd(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,1)
+
+#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsubs(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,0)
+#define ppc_fmsubsd(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,1)
+
+#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmul(c,D,A,C) ppc_fmulx(c,D,A,C,0)
+#define ppc_fmuld(c,D,A,C) ppc_fmulx(c,D,A,C,1)
+
+#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmuls(c,D,A,C) ppc_fmulsx(c,D,A,C,0)
+#define ppc_fmulsd(c,D,A,C) ppc_fmulsx(c,D,A,C,1)
+
+#define ppc_fnabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (136 << 1) | Rc)
+#define ppc_fnabs(c,D,B) ppc_fnabsx(c,D,B,0)
+#define ppc_fnabsd(c,D,B) ppc_fnabsx(c,D,B,1)
+
+#define ppc_fnegx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (40 << 1) | Rc)
+#define ppc_fneg(c,D,B) ppc_fnegx(c,D,B,0)
+#define ppc_fnegd(c,D,B) ppc_fnegx(c,D,B,1)
+
+#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,0)
+#define ppc_fnmaddd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,1)
+
+#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadds(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,0)
+#define ppc_fnmaddsd(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,1)
+
+#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsub(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,0)
+#define ppc_fnmsubd(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,1)
+
+#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsubs(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,0)
+#define ppc_fnmsubsd(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,1)
+
+#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (24 << 1) | Rc)
+#define ppc_fres(c,D,B) ppc_fresx(c,D,B,0)
+#define ppc_fresd(c,D,B) ppc_fresx(c,D,B,1)
+
+#define ppc_frspx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (12 << 1) | Rc)
+#define ppc_frsp(c,D,B) ppc_frspx(c,D,B,0)
+#define ppc_frspd(c,D,B) ppc_frspx(c,D,B,1)
+
+#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (26 << 1) | Rc)
+#define ppc_frsqrte(c,D,B) ppc_frsqrtex(c,D,B,0)
+#define ppc_frsqrted(c,D,B) ppc_frsqrtex(c,D,B,1)
+
+#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (23 << 1) | Rc)
+#define ppc_fsel(c,D,A,C,B) ppc_fselx(c,D,A,C,B,0)
+#define ppc_fseld(c,D,A,C,B) ppc_fselx(c,D,A,C,B,1)
+
+#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrt(c,D,B) ppc_fsqrtx(c,D,B,0)
+#define ppc_fsqrtd(c,D,B) ppc_fsqrtx(c,D,B,1)
+
+#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrts(c,D,B) ppc_fsqrtsx(c,D,B,0)
+#define ppc_fsqrtsd(c,D,B) ppc_fsqrtsx(c,D,B,1)
+
+#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsub(c,D,A,B) ppc_fsubx(c,D,A,B,0)
+#define ppc_fsubd(c,D,A,B) ppc_fsubx(c,D,A,B,1)
+
+#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsubs(c,D,A,B) ppc_fsubsx(c,D,A,B,0)
+#define ppc_fsubsd(c,D,A,B) ppc_fsubsx(c,D,A,B,1)
+
+#define ppc_icbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (982 << 1) | 0)
+
+#define ppc_isync(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (150 << 1) | 0)
+
+#define ppc_lbzu(c,D,d,A) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lbzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (119 << 1) | 0)
+#define ppc_lbzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (87 << 1) | 0)
+
+#define ppc_lfdu(c,D,d,A) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfdux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (631 << 1) | 0)
+#define ppc_lfdx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (599 << 1) | 0)
+
+#define ppc_lfsu(c,D,d,A) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfsux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (567 << 1) | 0)
+#define ppc_lfsx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (535 << 1) | 0)
+
+#define ppc_lha(c,D,d,A) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhau(c,D,d,A) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhaux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (375 << 1) | 0)
+#define ppc_lhax(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (343 << 1) | 0)
+#define ppc_lhbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (790 << 1) | 0)
+#define ppc_lhzu(c,D,d,A) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lhzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (311 << 1) | 0)
+#define ppc_lhzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (279 << 1) | 0)
+
+#define ppc_lmw(c,D,d,A) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lswi(c,D,A,NB) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (NB << 11) | (597 << 1) | 0)
+#define ppc_lswx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (533 << 1) | 0)
+#define ppc_lwarx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (20 << 1) | 0)
+#define ppc_lwbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (534 << 1) | 0)
+
+#define ppc_lwzu(c,D,d,A) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lwzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (55 << 1) | 0)
+#define ppc_lwzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (23 << 1) | 0)
+
+#define ppc_mcrf(c,crfD,crfS) ppc_emit32(c, (19 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | 0)
+#define ppc_mcrfs(c,crfD,crfS) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | (0 << 16) | (64 << 1) | 0)
+#define ppc_mcrxr(c,crfD) ppc_emit32(c, (31 << 26) | (crfD << 23) | (0 << 16) | (512 << 1) | 0)
+
+#define ppc_mfcr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (19 << 1) | 0)
+#define ppc_mffsx(c,D,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (583 << 1) | Rc)
+#define ppc_mffs(c,D) ppc_mffsx(c,D,0)
+#define ppc_mffsd(c,D) ppc_mffsx(c,D,1)
+#define ppc_mfmsr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (83 << 1) | 0)
+#define ppc_mfsr(c,D,SR) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (595 << 1) | 0)
+#define ppc_mfsrin(c,D,B) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (B << 11) | (659 << 1) | 0)
+#define ppc_mftb(c,D,TBR) ppc_emit32(c, (31 << 26) | (D << 21) | (TBR << 11) | (371 << 1) | 0)
+
+#define ppc_mtcrf(c,CRM,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (CRM << 12) | (0 << 11) | (144 << 1) | 0)
+
+#define ppc_mtfsb0x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (70 << 1) | Rc)
+#define ppc_mtfsb0(c,CRB) ppc_mtfsb0x(c,CRB,0)
+#define ppc_mtfsb0d(c,CRB) ppc_mtfsb0x(c,CRB,1)
+
+#define ppc_mtfsb1x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (38 << 1) | Rc)
+#define ppc_mtfsb1(c,CRB) ppc_mtfsb1x(c,CRB,0)
+#define ppc_mtfsb1d(c,CRB) ppc_mtfsb1x(c,CRB,1)
+
+#define ppc_mtfsfx(c,FM,B,Rc) ppc_emit32(c, (63 << 26) | (0 << 25) | (FM << 22) | (0 << 21) | (B << 11) | (711 << 1) | Rc)
+#define ppc_mtfsf(c,FM,B) ppc_mtfsfx(c,FM,B,0)
+#define ppc_mtfsfd(c,FM,B) ppc_mtfsfx(c,FM,B,1)
+
+#define ppc_mtfsfix(c,crfD,IMM,Rc) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 16) | (IMM << 12) | (0 << 11) | (134 << 1) | Rc)
+#define ppc_mtfsfi(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,0)
+#define ppc_mtfsfid(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,1)
+
+#define ppc_mtmsr(c, S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 11) | (146 << 1) | 0)
+
+#define ppc_mtsr(c,SR,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (210 << 1) | 0)
+#define ppc_mtsrin(c,S,B) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 16) | (B << 11) | (242 << 1) | 0)
+
+#define ppc_mulhwx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (75 << 1) | Rc)
+#define ppc_mulhw(c,D,A,B) ppc_mulhwx(c,D,A,B,0)
+#define ppc_mulhwd(c,D,A,B) ppc_mulhwx(c,D,A,B,1)
+
+#define ppc_mulhwux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (11 << 1) | Rc)
+#define ppc_mulhwu(c,D,A,B) ppc_mulhwux(c,D,A,B,0)
+#define ppc_mulhwud(c,D,A,B) ppc_mulhwux(c,D,A,B,1)
+
+#define ppc_mulli(c,D,A,SIMM) ppc_emit32(c, ((07) << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_mullwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (235 << 1) | Rc)
+#define ppc_mullw(c,D,A,B) ppc_mullwx(c,D,A,B,0,0)
+#define ppc_mullwd(c,D,A,B) ppc_mullwx(c,D,A,B,0,1)
+#define ppc_mullwo(c,D,A,B) ppc_mullwx(c,D,A,B,1,0)
+#define ppc_mullwod(c,D,A,B) ppc_mullwx(c,D,A,B,1,1)
+
+#define ppc_nandx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (476 << 1) | Rc)
+#define ppc_nand(c,A,S,B) ppc_nandx(c,A,S,B,0)
+#define ppc_nandd(c,A,S,B) ppc_nandx(c,A,S,B,1)
+
+#define ppc_negx(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (104 << 1) | Rc)
+#define ppc_neg(c,D,A) ppc_negx(c,D,A,0,0)
+#define ppc_negd(c,D,A) ppc_negx(c,D,A,0,1)
+#define ppc_nego(c,D,A) ppc_negx(c,D,A,1,0)
+#define ppc_negod(c,D,A) ppc_negx(c,D,A,1,1)
+
+#define ppc_norx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (124 << 1) | Rc)
+#define ppc_nor(c,A,S,B) ppc_norx(c,A,S,B,0)
+#define ppc_nord(c,A,S,B) ppc_norx(c,A,S,B,1)
+
+#define ppc_not(c,A,S) ppc_norx(c,A,S,S,0)
+
+#define ppc_orx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (444 << 1) | Rc)
+#define ppc_ord(c,A,S,B) ppc_orx(c,A,S,B,1)
+
+#define ppc_orcx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (412 << 1) | Rc)
+#define ppc_orc(c,A,S,B) ppc_orcx(c,A,S,B,0)
+#define ppc_orcd(c,A,S,B) ppc_orcx(c,A,S,B,1)
+
+#define ppc_oris(c,A,S,UIMM) ppc_emit32(c, (25 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+#define ppc_rfi(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (50 << 1) | 0)
+
+#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0)
+#define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1)
+
+#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | ((S) << 21) | ((A) << 16) | ((SH) << 11) | ((MB) << 6) | ((ME) << 1) | (Rc))
+#define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1)
+#define ppc_extlwi(c,A,S,n,b) ppc_rlwinm(c,A,S, b, 0, (n) - 1)
+#define ppc_extrwi(c,A,S,n,b) ppc_rlwinm(c,A,S, (b) + (n), 32 - (n), 31)
+#define ppc_rotlwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31)
+#define ppc_rotrwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), 0, 31)
+#define ppc_slwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31 - (n))
+#define ppc_srwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), n, 31)
+#define ppc_clrlwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, n, 31)
+#define ppc_clrrwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, 0, 31 - (n))
+#define ppc_clrlslwi(c,A,S,b,n) ppc_rlwinm(c,A,S, n, (b) - (n), 31 - (n))
+
+#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwnmd(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,1)
+
+#define ppc_sc(c) ppc_emit32(c, (17 << 26) | (0 << 2) | (1 << 1) | 0)
+
+#define ppc_slwx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (24 << 1) | Rc)
+#define ppc_slw(c,S,A,B) ppc_slwx(c,S,A,B,0)
+#define ppc_slwd(c,S,A,B) ppc_slwx(c,S,A,B,1)
+
+#define ppc_srawx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (792 << 1) | Rc)
+#define ppc_sraw(c,A,S,B) ppc_srawx(c,A,S,B,0)
+#define ppc_srawd(c,A,S,B) ppc_srawx(c,A,S,B,1)
+
+#define ppc_srawix(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (824 << 1) | Rc)
+#define ppc_srawi(c,A,S,B) ppc_srawix(c,A,S,B,0)
+#define ppc_srawid(c,A,S,B) ppc_srawix(c,A,S,B,1)
+
+#define ppc_srwx(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (536 << 1) | Rc)
+#define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0)
+#define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1)
+
+#define ppc_stbu(c,S,d,A) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0)
+#define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0)
+
+#define ppc_stfdu(c,S,d,A) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0)
+#define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0)
+
+#define ppc_stfsu(c,S,d,A) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0)
+#define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0)
+#define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0)
+#define ppc_sthu(c,S,d,A) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0)
+#define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0)
+#define ppc_stmw(c,S,d,A) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)d)
+#define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0)
+#define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0)
+#define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0)
+#define ppc_stwcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (150 << 1) | 1)
+#define ppc_stwux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (183 << 1) | 0)
+#define ppc_stwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (151 << 1) | 0)
+
+#define ppc_subfx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (40 << 1) | Rc)
+#define ppc_subf(c,D,A,B) ppc_subfx(c,D,A,B,0,0)
+#define ppc_subfd(c,D,A,B) ppc_subfx(c,D,A,B,0,1)
+#define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0)
+#define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1)
+
+#define ppc_sub(c,D,A,B) ppc_subf(c,D,B,A)
+
+#define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc)
+#define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0)
+#define ppc_subfcd(c,D,A,B) ppc_subfcx(c,D,A,B,0,1)
+#define ppc_subfco(c,D,A,B) ppc_subfcx(c,D,A,B,1,0)
+#define ppc_subfcod(c,D,A,B) ppc_subfcx(c,D,A,B,1,1)
+
+#define ppc_subfex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (136 << 1) | Rc)
+#define ppc_subfe(c,D,A,B) ppc_subfex(c,D,A,B,0,0)
+#define ppc_subfed(c,D,A,B) ppc_subfex(c,D,A,B,0,1)
+#define ppc_subfeo(c,D,A,B) ppc_subfex(c,D,A,B,1,0)
+#define ppc_subfeod(c,D,A,B) ppc_subfex(c,D,A,B,1,1)
+
+#define ppc_subfic(c,D,A,SIMM) ppc_emit32(c, (8 << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_subfmex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (232 << 1) | Rc)
+#define ppc_subfme(c,D,A) ppc_subfmex(c,D,A,0,0)
+#define ppc_subfmed(c,D,A) ppc_subfmex(c,D,A,0,1)
+#define ppc_subfmeo(c,D,A) ppc_subfmex(c,D,A,1,0)
+#define ppc_subfmeod(c,D,A) ppc_subfmex(c,D,A,1,1)
+
+#define ppc_subfzex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (200 << 1) | Rc)
+#define ppc_subfze(c,D,A) ppc_subfzex(c,D,A,0,0)
+#define ppc_subfzed(c,D,A) ppc_subfzex(c,D,A,0,1)
+#define ppc_subfzeo(c,D,A) ppc_subfzex(c,D,A,1,0)
+#define ppc_subfzeod(c,D,A) ppc_subfzex(c,D,A,1,1)
+
+#define ppc_sync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (598 << 1) | 0)
+#define ppc_tlbia(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (370 << 1) | 0)
+#define ppc_tlbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 16) | (B << 11) | (306 << 1) | 0)
+#define ppc_tlbsync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (566 << 1) | 0)
+
+#define ppc_tw(c,TO,A,B) ppc_emit32(c, (31 << 26) | (TO << 21) | (A << 16) | (B << 11) | (4 << 1) | 0)
+#define ppc_twi(c,TO,A,SIMM) ppc_emit32(c, (3 << 26) | (TO << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_xorx(c,A,S,B,RC) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (316 << 1) | RC)
+#define ppc_xor(c,A,S,B) ppc_xorx(c,A,S,B,0)
+#define ppc_xord(c,A,S,B) ppc_xorx(c,A,S,B,1)
+
+#define ppc_xori(c,S,A,UIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+#define ppc_xoris(c,S,A,UIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+/* this marks the end of my work, ct */
+
+/* PPC64 */
+
+/* The following FP instructions are not are available to 32-bit
+ implementations (prior to PowerISA-V2.01 but are available to
+ 32-bit mode programs on 64-bit PowerPC implementations and all
+ processors compliant with PowerISA-2.01 or later. */
+
+#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc))
+#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0)
+#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1)
+
+#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc))
+#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0)
+#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1)
+
+#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc))
+#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0)
+#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1)
+
+#ifdef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \
+ ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define PPC_LOAD_SEQUENCE_LENGTH 20
+
+#define ppc_is_imm32(val) (((((gint64)val)>> 31) == 0) || ((((gint64)val)>> 31) == -1))
+#define ppc_is_imm48(val) (((((gint64)val)>> 47) == 0) || ((((gint64)val)>> 47) == -1))
+
+#define ppc_load48(c,D,v) G_STMT_START { \
+ ppc_li ((c), (D), ((gint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint64)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint64)(v)); \
+ } else if (ppc_is_imm32 ((guint64)(v))) { \
+ ppc_load32 ((c), (D), (guint32)(guint64)(v)); \
+ } else if (ppc_is_imm48 ((guint64)(v))) { \
+ ppc_load48 ((c), (D), (guint64)(v)); \
+ } else { \
+ ppc_load_sequence ((c), (D), (guint64)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,v) G_STMT_START { \
+ ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \
+ ppc_ldptr ((c), ppc_r2, sizeof (gpointer), ppc_r11); \
+ ppc_ldptr ((c), (D), 0, ppc_r11); \
+ } G_STMT_END
+
+#define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (D); __i <= 31; ++__i) { \
+ ppc_ldr ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_store_multiple_regs(c,S,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (S); __i <= 31; ++__i) { \
+ ppc_str ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 1, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 1, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_sld((c), (A), (S), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_sldi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srdi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_sradi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mulld((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrdi((c), (A), (S), (n))
+
+#define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc))
+#define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0)
+#define ppc_divdd(c,D,A,B) ppc_divdx(c,D,A,B,0,1)
+#define ppc_divdo(c,D,A,B) ppc_divdx(c,D,A,B,1,0)
+#define ppc_divdod(c,D,A,B) ppc_divdx(c,D,A,B,1,1)
+
+#define ppc_divdux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (457 << 1) | (Rc))
+#define ppc_divdu(c,D,A,B) ppc_divdux(c,D,A,B,0,0)
+#define ppc_divdud(c,D,A,B) ppc_divdux(c,D,A,B,0,1)
+#define ppc_divduo(c,D,A,B) ppc_divdux(c,D,A,B,1,0)
+#define ppc_divduod(c,D,A,B) ppc_divdux(c,D,A,B,1,1)
+
+#define ppc_extswx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (0 << 11) | (986 << 1) | (Rc))
+#define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0)
+#define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1)
+
+/* These move float to/from instuctions are only available on POWER6 in
+ native mode. These instruction are faster then the equivalent
+ store/load because they avoid the store queue and associated delays.
+ These instructions should only be used in 64-bit mode unless the
+ kernel preserves the 64-bit GPR on signals and dispatch in 32-bit
+ mode. The Linux kernel does not. */
+#define ppc_mftgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (735 << 1) | 0)
+#define ppc_mffgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (607 << 1) | 0)
+
+#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_lwa(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((ds) & 0xfffc) | 2)
+#define ppc_ldarx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (84 << 1) | 0)
+#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_ldux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (53 << 1) | 0)
+#define ppc_lwaux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (373 << 1) | 0)
+#define ppc_ldx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (21 << 1) | 0)
+#define ppc_lwax(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (341 << 1) | 0)
+
+#define ppc_mulhdx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (73 << 1) | (Rc))
+#define ppc_mulhd(c,D,A,B) ppc_mulhdx(c,D,A,B,0)
+#define ppc_mulhdd(c,D,A,B) ppc_mulhdx(c,D,A,B,1)
+#define ppc_mulhdux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (9 << 1) | (Rc))
+#define ppc_mulhdu(c,D,A,B) ppc_mulhdux(c,D,A,B,0)
+#define ppc_mulhdud(c,D,A,B) ppc_mulhdux(c,D,A,B,1)
+
+#define ppc_mulldx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (233 << 1) | (Rc))
+#define ppc_mulld(c,D,A,B) ppc_mulldx(c,D,A,B,0,0)
+#define ppc_mulldd(c,D,A,B) ppc_mulldx(c,D,A,B,0,1)
+#define ppc_mulldo(c,D,A,B) ppc_mulldx(c,D,A,B,1,0)
+#define ppc_mulldod(c,D,A,B) ppc_mulldx(c,D,A,B,1,1)
+
+#define ppc_rldclx(c,A,S,B,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(MB) << 5) | (8 << 1) | (Rc))
+#define ppc_rldcl(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,0)
+#define ppc_rldcld(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,1)
+#define ppc_rotld(c,A,S,B) ppc_rldcl(c, A, S, B, 0)
+
+#define ppc_rldcrx(c,A,S,B,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(ME) << 5) | (9 << 1) | (Rc))
+#define ppc_rldcr(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,0)
+#define ppc_rldcrd(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,1)
+
+#define ppc_rldicx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (2 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldic(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,0)
+#define ppc_rldicd(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,1)
+
+#define ppc_rldiclx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (0 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicl(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,0)
+#define ppc_rldicld(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,1)
+#define ppc_extrdi(c,A,S,n,b) ppc_rldicl(c,A,S, (b) + (n), 64 - (n))
+#define ppc_rotldi(c,A,S,n) ppc_rldicl(c,A,S, n, 0)
+#define ppc_rotrdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), 0)
+#define ppc_srdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), n)
+#define ppc_clrldi(c,A,S,n) ppc_rldicl(c,A,S, 0, n)
+
+#define ppc_rldicrx(c,A,S,SH,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(ME) << 5) | (1 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicr(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,0)
+#define ppc_rldicrd(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,1)
+#define ppc_extldi(c,A,S,n,b) ppc_rldicr(c, A, S, b, (n) - 1)
+#define ppc_sldi(c,A,S,n) ppc_rldicr(c, A, S, n, 63 - (n))
+#define ppc_clrrdi(c,A,S,n) ppc_rldicr(c, A, S, 0, 63 - (n))
+
+#define ppc_rldimix(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (3 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldimi(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,0)
+#define ppc_rldimid(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,1)
+
+#define ppc_slbia(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (498 << 1) | 0)
+#define ppc_slbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | ((B) << 11) | (434 << 1) | 0)
+#define ppc_sldx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (27 << 1) | (Rc))
+#define ppc_sld(c,A,S,B) ppc_sldx(c,S,A,B,0)
+#define ppc_sldd(c,A,S,B) ppc_sldx(c,S,A,B,1)
+
+#define ppc_sradx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (794 << 1) | (Rc))
+#define ppc_srad(c,A,S,B) ppc_sradx(c,S,A,B,0)
+#define ppc_sradd(c,A,S,B) ppc_sradx(c,S,A,B,1)
+#define ppc_sradix(c,S,A,SH,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (((SH) & 31) << 11) | (413 << 2) | (((SH) >> 5) << 1) | (Rc))
+#define ppc_sradi(c,A,S,SH) ppc_sradix(c,S,A,SH,0)
+#define ppc_sradid(c,A,S,SH) ppc_sradix(c,S,A,SH,1)
+
+#define ppc_srdx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (539 << 1) | (Rc))
+#define ppc_srd(c,A,S,B) ppc_srdx(c,S,A,B,0)
+#define ppc_srdd(c,A,S,B) ppc_srdx(c,S,A,B,1)
+
+#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_stdcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (214 << 1) | 1)
+#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0)
+#define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0)
+
+#else
+/* Always true for 32-bit */
+#define ppc_is_imm32(val) (1)
+#endif
+
+#endif
diff --git a/src/arch/s390x/.gitignore b/src/arch/s390x/.gitignore
new file mode 100644
index 0000000..341daec
--- /dev/null
+++ b/src/arch/s390x/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
diff --git a/src/arch/s390x/ChangeLog b/src/arch/s390x/ChangeLog
new file mode 100644
index 0000000..e756d35
--- /dev/null
+++ b/src/arch/s390x/ChangeLog
@@ -0,0 +1,35 @@
+2010-03-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Remove duplicate
+
+2009-06-24 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2007-04-12 Neale Ferguson <neale@sinenomine.net>
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+2007-01-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+2006-03-13 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Fix immediate checks.
+
+2006-01-06 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+2006-01-03 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2004-12-15 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h: Add some new instructions (CS, CSG, CSY, CDS, CDSG, CDSY)
+
+2004-08-03 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h Makefile.am tramp.c: S/390 64-bit interpreter
diff --git a/src/arch/s390x/Makefile.am b/src/arch/s390x/Makefile.am
new file mode 100644
index 0000000..ce7f470
--- /dev/null
+++ b/src/arch/s390x/Makefile.am
@@ -0,0 +1,7 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-s390x.la
+
+libmonoarch_s390x_la_SOURCES = tramp.c s390x-codegen.h
+
diff --git a/src/arch/s390x/s390x-codegen.h b/src/arch/s390x/s390x-codegen.h
new file mode 100644
index 0000000..47e6564
--- /dev/null
+++ b/src/arch/s390x/s390x-codegen.h
@@ -0,0 +1,997 @@
+/*
+ Copyright (C) 2001 Radek Doulik
+*/
+
+#ifndef S390X_H
+#define S390X_H
+#include <glib.h>
+#include <assert.h>
+#include <limits.h>
+
+#define FLOAT_REGS 2 /* No. float registers for parms */
+#define GENERAL_REGS 5 /* No. general registers for parms */
+
+#define ARG_BASE s390_r10 /* Register for addressing arguments*/
+#define STKARG \
+ (i*(sizeof(stackval))) /* Displacement of ith argument */
+
+#define MINV_POS 160 /* MonoInvocation stack offset */
+#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count)
+#define OBJ_POS 8
+#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type))
+
+#define MIN_CACHE_LINE 256
+
+/*------------------------------------------------------------------*/
+/* Sequence to add an int/long long to parameters to stack_from_data*/
+/*------------------------------------------------------------------*/
+#define ADD_ISTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a float/double to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_RSTACK_PARM(i) \
+ if (fpr_param < FLOAT_REGS) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ float_pos + (fpr_param * sizeof(float) * (i))); \
+ fpr_param++; \
+ } else { \
+ stack_param += (stack_param % (i)); \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a structure ptr to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_TSTACK_PARM \
+ if (reg_param < GENERAL_REGS) { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param++; \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+#define ADD_PSTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+typedef enum {
+ s390_r0 = 0,
+ s390_r1,
+ s390_r2,
+ s390_r3,
+ s390_r4,
+ s390_r5,
+ s390_r6,
+ s390_r7,
+ s390_r8,
+ s390_r9,
+ s390_r10,
+ s390_r11,
+ s390_r12,
+ s390_r13,
+ s390_r14,
+ s390_r15,
+} S390IntRegister;
+
+typedef enum {
+ s390_f0 = 0,
+ s390_f1,
+ s390_f2,
+ s390_f3,
+ s390_f4,
+ s390_f5,
+ s390_f6,
+ s390_f7,
+ s390_f8,
+ s390_f9,
+ s390_f10,
+ s390_f11,
+ s390_f12,
+ s390_f13,
+ s390_f14,
+ s390_f15,
+} S390FloatRegister;
+
+typedef enum {
+ s390_a0 = 0,
+ s390_a1,
+ s390_a2,
+ s390_a3,
+ s390_a4,
+ s390_a5,
+ s390_a6,
+ s390_a7,
+ s390_a8,
+ s390_a9,
+ s390_a10,
+ s390_a11,
+ s390_a12,
+ s390_a13,
+ s390_a14,
+ s390_a15,
+} S390AccRegister;
+
+typedef enum {
+ s390_fpc = 256,
+} S390SpecialRegister;
+
+#define s390_is_imm16(val) ((glong)val >= (glong) SHRT_MIN && \
+ (glong)val <= (glong) SHRT_MAX)
+#define s390_is_imm32(val) ((glong)val >= (glong) INT_MIN && \
+ (glong)val <= (glong) INT_MAX)
+#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= (glong) USHRT_MAX)
+#define s390_is_uimm32(val) ((glong)val >= 0 && (glong)val <= (glong) UINT_MAX)
+#define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575)
+#define s390_is_imm20(val) ((glong)val >= -524288 && (glong)val <= 524287)
+#define s390_is_imm12(val) ((glong)val >= (glong)-4096 && \
+ (glong)val <= (glong)4095)
+#define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095)
+
+#define STK_BASE s390_r15
+#define S390_SP s390_r15
+#define S390_FP s390_r11
+#define S390_MINIMAL_STACK_SIZE 160
+#define S390_REG_SAVE_OFFSET 48
+#define S390_PARM_SAVE_OFFSET 16
+#define S390_RET_ADDR_OFFSET 112
+#define S390_FLOAT_SAVE_OFFSET 128
+
+#define S390_CC_ZR 8
+#define S390_CC_NE 7
+#define S390_CC_NZ 7
+#define S390_CC_LT 4
+#define S390_CC_GT 2
+#define S390_CC_GE 11
+#define S390_CC_NM 11
+#define S390_CC_LE 13
+#define S390_CC_OV 1
+#define S390_CC_NO 14
+#define S390_CC_CY 3
+#define S390_CC_NC 12
+#define S390_CC_UN 15
+
+#define s390_word(addr, value) do \
+{ \
+ * (guint32 *) addr = (guint32) value; \
+ addr += sizeof(guint32); \
+} while (0)
+
+#define s390_float(addr, value) do \
+{ \
+ * (gfloat *) addr = (gfloat) value; \
+ addr += sizeof(gfloat); \
+} while (0)
+
+#define s390_llong(addr, value) do \
+{ \
+ * (guint64 *) addr = (guint64) value; \
+ addr += sizeof(guint64); \
+} while (0)
+
+#define s390_double(addr, value) do \
+{ \
+ * (gdouble *) addr = (gdouble) value; \
+ addr += sizeof(gdouble); \
+} while (0)
+
+typedef struct {
+ short op;
+} E_Format;
+
+typedef struct {
+ char op;
+ int im;
+} I_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r2 : 4;
+} RR_Format;
+
+typedef struct {
+ short op;
+ char xx;
+ char r1 : 4;
+ char r2 : 4;
+} RRE_Format;
+
+typedef struct {
+ short op;
+ char r1 : 4;
+ char xx : 4;
+ char r3 : 4;
+ char r2 : 4;
+} RRF_Format_1;
+
+typedef struct {
+ short op;
+ char m3 : 4;
+ char xx : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_2;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char m4 : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ short d2 : 12;
+} RX_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char xx;
+ char op2;
+} RXE_Format;
+
+typedef struct {
+ char op1;
+ char r3 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char r1 : 4;
+ char xx : 4;
+ char op2;
+} RXF_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RXY_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_1;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char xx : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_2;
+
+typedef struct {
+ char op1;
+ char l1 : 4;
+ char xx : 4;
+ char b1 : 4;
+ int d1 : 12;
+ char yy;
+ char op2;
+} RSL_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+} RSI_Format;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ short i2;
+} RI_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char xx;
+ char op2;
+} RIE_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char m2 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_2;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short d;
+ char i;
+ char op2;
+} RIE_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char yy : 4;
+ short i2;
+ char m3 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_4;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_1;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_2;
+
+typedef struct {
+ char op;
+ char i2;
+ char b1 : 4;
+ short d1 : 12;
+} SI_Format;
+
+typedef struct {
+ char op1;
+ char i2;
+ char b1 : 4;
+ int d1 : 20;
+ char op2;
+} __attribute__ ((packed)) SIY_Format;
+
+typedef struct {
+ short op;
+ char b2 : 4;
+ short d2 : 12;
+} S_Format;
+
+typedef struct {
+ char op;
+ char ll;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_1;
+
+typedef struct {
+ char op;
+ char l1 : 4;
+ char l2 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ short d2 : 12;
+ char b4 : 4;
+ short d4 : 12;
+} SS_Format_4;
+
+typedef struct {
+ short op;
+ short tb1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSE_Format;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char o2 : 4;
+ short b1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSF_Format;
+
+#define s390_emit16(c, x) do \
+{ \
+ *((guint16 *) c) = (guint16) x; \
+ c += sizeof(guint16); \
+} while(0)
+
+#define s390_emit32(c, x) do \
+{ \
+ *((guint32 *) c) = (guint32) x; \
+ c += sizeof(guint32); \
+} while(0)
+
+#define S390_E(c,opc) s390_emit16(c,opc)
+
+#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm))
+
+#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2))
+
+#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2))
+
+#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2))
+
+#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2))
+
+#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2))
+
+#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RXE(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RXY(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSL(c,opc,ln,s1,p1) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \
+ s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff)))
+
+#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff)))
+
+#define S390_RIE_1(c,opc,g1,g3,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_2(c,opc,g1,g2,m3,v) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit16(c, (v)); \
+ s390_emit16(c, ((m2) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_3(c,opc,g1,i,m3,d) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | m3)); \
+ s390_emit16(c, (d)); \
+ s390_emit16(c, ((i) << 8 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_4(c,opc,g1,i2,m3) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4); \
+ s390_emit16(c, (i2)); \
+ s390_emit16(c, ((m3) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIL_1(c,opc,g1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIL_2(c,opc,k1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIS(c,opc,r,i,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((i) << 4) | ((opc) & 0xff)); \
+}
+
+#define S390_RRS(c,opc,r1,r2,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((m3) << 12) | ((opc) & 0xff)); \
+}
+
+#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff)));
+
+#define S390_SIY(c,opc,s1,p1,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | m2)); \
+ s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s2) << 12 | ((p2) & 0xfff))); \
+ s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \
+} while (0)
+
+#define S390_SSE(c,opc,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, opc); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SSF(c,opc,r3,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, (((opc) & 0xff00) << 8) | ((r3) << 4) | \
+ ((opc) & 0xf)); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d)
+#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d)
+#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2)
+#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2)
+#define s390_afi(c, r, v) S390_RIL_1(c, 0xc29, r, v);
+#define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d)
+#define s390_agf(c, r, x, b, d) S390_RXY(c, 0xe318, r, x, b, d)
+#define s390_agfi(c, r, v) S390_RIL_1(c, 0xc28, r, v)
+#define s390_afgr(c, r1, r2) S390_RRE(c, 0xb918, r1, r2)
+#define s390_aghi(c, r, v) S390_RI(c, 0xa7b, r, v)
+#define s390_aghik(c, r, v) S390_RIE_1(c, 0xecd9, r, v)
+#define s390_agr(c, r1, r2) S390_RRE(c, 0xb908, r1, r2)
+#define s390_agrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e8, r1, r2, r3)
+#define s390_agsi(c, r, v) S390_SIY(c, 0xeb7a, r v)
+#define s390_ahhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9c8, r1, r2, r3)
+#define s390_ahhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9d8, r1, r2, r3)
+#define s390_ahi(c, r, v) S390_RI(c, 0xa7a, r, v)
+#define s390_ahik(c, r, v) S390_RIE_1(c, 0xecd8, r, v)
+#define s390_ahy(c, r, x, b, d) S390_RXY(c, 0xe37a, r, b, d)
+#define s390_aih(c, r, v) S390_RIL_1(c, 0xcc8, r, v)
+#define s390_al(c, r, x, b, d) S390_RX(c, 0x5e, r, x, b, d)
+#define s390_alc(c, r, x, b, d) S390_RXY(c, 0xe398, r, x, b, d)
+#define s390_alcg(c, r, x, b, d) S390_RXY(c, 0xe388, r, x, b, d)
+#define s390_alcgr(c, r1, r2) S390_RRE(c, 0xb988, r1, r2)
+#define s390_alcr(c, r1, r2) S390_RRE(c, 0xb998, r1, r2)
+#define s390_alfi(c, r, v) S390_RIL_1(c, 0xc2b, r, v)
+#define s390_alg(c, r, x, b, d) S390_RXY(c, 0xe30a, r, x, b, d)
+#define s390_algf(c, r, x, b, d) S390_RXY(c, 0xe31a, r, x, b, d)
+#define s390_algfi(c, r, v) S390_RIL_1(c, 0xc2a, r, v)
+#define s390_algfr(c, r1, r2) S390_RRE(c, 0xb91a, r1, r2)
+#define s390_alghsik(c, r, v) S390_RIE_1(c, 0xecd8, r, v)
+#define s390_algr(c, r1, r2) S390_RRE(c, 0xb90a, r1, r2)
+#define s390_algsi(c, r, v) S390_SIY(c, 0xeb7e, r, v)
+#define s390_alhhhr(c, r1, r2, r3) S390_RRF_1(c, 0xb9ca, r1, r2, r3)
+#define s390_alhhlr(c, r1, r2, r3) S390_RRF_1(c, 0xb9da, r1, r2, r3)
+#define s390_alhsik(c, r, v) S390_RIE_1(c, 0xecda, r, v)
+#define s390_alr(c, r1, r2) S390_RR(c, 0x1e, r1, r2)
+#define s390_alrk(c, r1, r2) S390_RRF(c, 0xb9fa, r1, r2)
+#define s390_alsi(c, r, v) S390_SIY(c, 0xeb6e, r, v)
+#define s390_alsih(c, r, v) S390_RIL_1(c, 0xcca, r, v)
+#define s390_alsihn(c, r, v) S390_RIL_1(c, 0xccb, r, v)
+#define s390_aly(c, r, x, b, d) S390_RXY(c, 0xe35e, r, x, b, d)
+#define s390_ar(c, r1, r2) S390_RR(c, 0x1a, r1, r2)
+#define s390_ark(c, r1, r2, r3) S390_RRF_1(c, 0xb9f8, r1, r2, r3)
+#define s390_asi(c, r, v) S390_SIY(c, 0xeb6a, r, v)
+#define s390_ay(c, r, x, b, d) S390_RXY(c, 0xe35a, r, x, b, d)
+#define s390_basr(c, r1, r2) S390_RR(c, 0x0d, r1, r2)
+#define s390_bctr(c, r1, r2) S390_RR(c, 0x06, r1, r2)
+#define s390_bctrg(c, r1, r2) S390_RRE(c, 0xb946, r1, r2)
+#define s390_bnzr(c, r) S390_RR(c, 0x07, 0x07, r)
+#define s390_bras(c, r, o) S390_RI(c, 0xa75, r, o)
+#define s390_brasl(c, r, o) S390_RIL_1(c, 0xc05, r, o)
+#define s390_brc(c, m, d) S390_RI(c, 0xa74, m, d)
+#define s390_brcl(c, m, d) S390_RIL_2(c, 0xc04, m, d)
+#define s390_br(c, r) S390_RR(c, 0x07, 0xf, r)
+#define s390_break(c) S390_RR(c, 0, 0, 0)
+#define s390_bzr(c, r) S390_RR(c, 0x07, 0x08, r)
+#define s390_c(c, r, x, b, d) S390_RX(c, 0x59, r, x, b, d)
+#define s390_cdb(c, r, x, b, d) S390_RXE(c, 0xed19, r, x, b, d)
+#define s390_cdbr(c, r1, r2) S390_RRE(c, 0xb319, r1, r2)
+#define s390_cdfbr(c, r1, r2) S390_RRE(c, 0xb395, r1, r2)
+#define s390_cdgbr(c, r1, r2) S390_RRE(c, 0xb3a5, r1, r2)
+#define s390_cds(c, r1, r2, b, d) S390_RX(c, 0xbb, r1, r2, b, d)
+#define s390_cdsg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb3e, r1, r2, b, d)
+#define s390_cdsy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb31, r1, r2, b, d)
+#define s390_cebr(c, r1, r2) S390_RRE(c, 0xb309, r1, r2)
+#define s390_cegbr(c, r1, r2) S390_RRE(c, 0xb3a4, r1, r2)
+#define s390_cfdbr(c, r1, m, r2) S390_RRF_2(c, 0xb399, r1, m, r2)
+#define s390_cfi(c, r, v) S390_RIL_1(c, 0xc2d, r, v)
+#define s390_cgdbr(c, r1, m, r2) S390_RRF_2(c, 0xb3a9, r1, m, r2)
+#define s390_cg(c, r, x, b, d) S390_RXY(c, 0xe320, r, x, b, d)
+#define s390_cgfi(c, r, v) S390_RIL_1(c, 0xc2c, r, v)
+#define s390_cgfrl(c, r, v) S390_RIL_1(c, 0xc6c, r, v)
+#define s390_cghi(c, r, i) S390_RI(c, 0xa7f, r, i)
+#define s390_cgib(c, r, i, m, b, d) S390_RIS(c, 0xecfc, r, i, m, b, d)
+#define s390_cgij(c, r, i, m, d) S390_RIE_3(c, 0xec7c, r, i, m, d)
+#define s390_cgit(c, r, i, m) S390_RIE_4(c, 0xec70, r, i m);
+#define s390_cgr(c, r1, r2) S390_RRE(c, 0xb920, r1, r2)
+#define s390_cgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece4, r1, r2, m3, b, d)
+#define s390_cgrj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec64, r1, r2, m3, v)
+#define s390_cgrl(c, r, v) S390_RIL_1(c, 0xc68, r, v)
+#define s390_chi(c, r, i) S390_RI(c, 0xa7e, r, i)
+#define s390_cib(c, r, i, m, b, d) S390_RIS(c, 0xecfe, r, i, m, b, d)
+#define s390_cij(c, r, i, m, d) S390_RIE_3(c, 0xec7e, r, i, m, d)
+#define s390_cit(c, r, i, m) S390_RIE_4(c, 0xec72, r, i m);
+#define s390_cl(c, r, x, b, d) S390_RX(c, 0x55, r, x, b, d)
+#define s390_clg(c, r, x, b, d) S390_RXY(c, 0xe321, r, x, b, d)
+#define s390_clgib(c, r, i, m, b, d) S390_RIS(c, 0xecfd, r, i, m, b, d)
+#define s390_clgij(c, r, i, b) S390_RIE_3(c, 0xec7d, r, i, m, d)
+#define s390_clgr(c, r1, r2) S390_RRE(c, 0xb921, r1, r2)
+#define s390_clgrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec65, r1, r2, m, v)
+#define s390_clgrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xece5, r1, r2, m3, b, d)
+#define s390_clib(c, r, i, m, b, d) S390_RIS(c, 0xecff, r, i, m, b, d)
+#define s390_clij(c, r, i, b) S390_RIE_3(c, 0xec7f, r, i, m, d)
+#define s390_clr(c, r1, r2) S390_RR(c, 0x15, r1, r2)
+#define s390_clrb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf7, r1, r2, m3, b, d)
+#define s390_clrj(c, r1, r2, m, v) S390_RIE_2(c, 0xec77, r1, r2, m, v)
+#define s390_cr(c, r1, r2) S390_RR(c, 0x19, r1, r2)
+#define s390_crb(c, r1, r2, m3, b, d) S390_RRS(c, 0xecf6, r1, r2, m3, b, d)
+#define s390_crj(c, r1, r2, m3, v) S390_RIE_2(c, 0xec76, r1, r2, m3, v)
+#define s390_crl(c, r, v) S390_RIL_1(c, 0xc6d, r, v)
+#define s390_crt(c, r1, r2, m3) S390_RRF_2(c, 0xb972, r1, r2, m3);
+#define s390_cgrt(c, r1, r2, m3) S390_RRF_2(c, 0xb960, r1, r2, m3);
+#define s390_cs(c, r1, r2, b, d) S390_RX(c, 0xba, r1, r2, b, d)
+#define s390_csg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb30, r1, r2, b, d)
+#define s390_csst(c, d1, b1, d2, b2, r) S390_SSF(c, 0xc82, b1, d1, b2, d2, r)
+#define s390_csy(c, r1, r2, b, d) S390_RSY_1(c, 0xeb14, r1, r2, b, d)
+#define s390_ddbr(c, r1, r2) S390_RRE(c, 0xb31d, r1, r2)
+#define s390_debr(c, r1, r2) S390_RRE(c, 0xb30d, r1, r2)
+#define s390_didbr(c, r1, r2, m, r3) S390_RRF_3(c, 0xb35b, r1, r2, m, r3)
+#define s390_dlgr(c, r1, r2) S390_RRE(c, 0xb987, r1, r2)
+#define s390_dlr(c, r1, r2) S390_RRE(c, 0xb997, r1, r2)
+#define s390_dr(c, r1, r2) S390_RR(c, 0x1d, r1, r2)
+#define s390_dsgfr(c, r1, r2) S390_RRE(c, 0xb91d, r1, r2)
+#define s390_dsgr(c, r1, r2) S390_RRE(c, 0xb90d, r1, r2)
+#define s390_ear(c, r1, r2) S390_RRE(c, 0xb24f, r1, r2)
+#define s390_ic(c, r, x, b, d) S390_RX(c, 0x43, r, x, b, d)
+#define s390_icm(c, r, m, b, d) S390_RX(c, 0xbf, r, m, b, d)
+#define s390_icmy(c, r, x, b, d) S390_RXY(c, 0xeb81, r, x, b, d)
+#define s390_icy(c, r, x, b, d) S390_RXY(c, 0xe373, r, x, b, d)
+#define s390_iihf(c, r, v) S390_RIL_1(c, 0xc08, r, v)
+#define s390_iihh(c, r, v) S390_RI(c, 0xa50, r, v)
+#define s390_iihl(c, r, v) S390_RI(c, 0xa51, r, v)
+#define s390_iilf(c, r, v) S390_RIL_1(c, 0xc09, r, v)
+#define s390_iilh(c, r, v) S390_RI(c, 0xa52, r, v)
+#define s390_iill(c, r, v) S390_RI(c, 0xa53, r, v)
+#define s390_j(c,d) s390_brc(c, S390_CC_UN, d)
+#define s390_jc(c, m, d) s390_brc(c, m, d)
+#define s390_jcl(c, m, d) s390_brcl(c, m, d)
+#define s390_jcy(c, d) s390_brc(c, S390_CC_CY, d)
+#define s390_je(c, d) s390_brc(c, S390_CC_EQ, d)
+#define s390_jeo(c, d) s390_brc(c, S390_CC_ZR|S390_CC_OV, d)
+#define s390_jh(c, d) s390_brc(c, S390_CC_GT, d)
+#define s390_jho(c, d) s390_brc(c, S390_CC_GT|S390_CC_OV, d)
+#define s390_jl(c, d) s390_brc(c, S390_CC_LT, d)
+#define s390_jlo(c, d) s390_brc(c, S390_CC_LT|S390_CC_OV, d)
+#define s390_jm(c, d) s390_brc(c, S390_CC_LT, d)
+#define s390_jnc(c, d) s390_brc(c, S390_CC_NC, d)
+#define s390_jne(c, d) s390_brc(c, S390_CC_NZ, d)
+#define s390_jnh(c, d) s390_brc(c, S390_CC_LE, d)
+#define s390_jnl(c, d) s390_brc(c, S390_CC_GE, d)
+#define s390_jnz(c, d) s390_brc(c, S390_CC_NZ, d)
+#define s390_jo(c, d) s390_brc(c, S390_CC_OV, d)
+#define s390_jno(c, d) s390_brc(c, S390_CC_NO, d)
+#define s390_jp(c, d) s390_brc(c, S390_CC_GT, d)
+#define s390_jz(c, d) s390_brc(c, S390_CC_ZR, d)
+#define s390_jg(c,d) s390_brcl(c, S390_CC_UN, d)
+#define s390_jgcy(c, d) s390_brcl(c, S390_CC_CY, d)
+#define s390_jge(c, d) s390_brcl(c, S390_CC_EQ, d)
+#define s390_jgeo(c, d) s390_brcl(c, S390_CC_ZR|S390_CC_OV, d)
+#define s390_jgh(c, d) s390_brcl(c, S390_CC_GT, d)
+#define s390_jgho(c, d) s390_brcl(c, S390_CC_GT|S390_CC_OV, d)
+#define s390_jgl(c, d) s390_brcl(c, S390_CC_LT, d)
+#define s390_jglo(c, d) s390_brcl(c, S390_CC_LT|S390_CC_OV, d)
+#define s390_jgm(c, d) s390_brcl(c, S390_CC_LT, d)
+#define s390_jgnc(c, d) s390_brcl(c, S390_CC_NC, d)
+#define s390_jgne(c, d) s390_brcl(c, S390_CC_NZ, d)
+#define s390_jgnh(c, d) s390_brcl(c, S390_CC_LE, d)
+#define s390_jgnl(c, d) s390_brcl(c, S390_CC_GE, d)
+#define s390_jgnz(c, d) s390_brcl(c, S390_CC_NZ, d)
+#define s390_jgo(c, d) s390_brcl(c, S390_CC_OV, d)
+#define s390_jgno(c, d) s390_brcl(c, S390_CC_NO, d)
+#define s390_jgp(c, d) s390_brcl(c, S390_CC_GT, d)
+#define s390_jgz(c, d) s390_brcl(c, S390_CC_ZR, d)
+#define s390_l(c, r, x, b, d) S390_RX(c, 0x58, r, x, b, d)
+#define s390_ly(c, r, x, b, d) S390_RXY(c, 0xe358, r, x, b, d)
+#define s390_la(c, r, x, b, d) S390_RX(c, 0x41, r, x, b, d)
+#define s390_lay(c, r, x, b, d) S390_RXY(c, 0xe371, r, x, b, d)
+#define s390_lam(c, r1, r2, b, d) S390_RS_1(c, 0x9a, r1, r2, b, d)
+#define s390_larl(c, r, o) S390_RIL_1(c, 0xc00, r, o)
+#define s390_lb(c, r, x, b, d) S390_RXY(c, 0xe376, r, x, b, d)
+#define s390_lbr(c, r1, r2) S390_RRE(c, 0xb926, r1, r2)
+#define s390_lcdbr(c, r1, r2) S390_RRE(c, 0xb313, r1, r2)
+#define s390_lcgr(c, r1, r2) S390_RRE(c, 0xb903, r1, r2)
+#define s390_lcr(c, r1, r2) S390_RR(c, 0x13, r1, r2)
+#define s390_ld(c, f, x, b, d) S390_RX(c, 0x68, f, x, b, d)
+#define s390_ldy(c, r, x, b, d) S390_RXY(c, 0xed65, r, x, b, d)
+#define s390_ldeb(c, r, x, b, d) S390_RXE(c, 0xed04, r, x, b, d)
+#define s390_ldebr(c, r1, r2) S390_RRE(c, 0xb304, r1, r2)
+#define s390_ldgr(c, r1, r2) S390_RRE(c, 0xb3c1, r1, r2)
+#define s390_ldr(c, r1, r2) S390_RR(c, 0x28, r1, r2)
+#define s390_le(c, f, x, b, d) S390_RX(c, 0x78, f, x, b, d)
+#define s390_ledbr(c, r1, r2) S390_RRE(c, 0xb344, r1, r2)
+#define s390_ler(c, r1, r2) S390_RR(c, 0x38, r1, r2)
+#define s390_ley(c, r, x, b, d) S390_RXY(c, 0xed64, r, x, b, d)
+#define s390_lg(c, r, x, b, d) S390_RXY(c, 0xe304, r, x, b, d)
+#define s390_lgb(c, r, x, b, d) S390_RXY(c, 0xe377, r, x, b, d)
+#define s390_lgbr(c, r1, r2) S390_RRE(c, 0xb906, r1, r2)
+#define s390_lgdr(c, r1, r2) S390_RRE(c, 0xb3cd, r1, r2)
+#define s390_lgf(c, r, x, b, d) S390_RXY(c, 0xe314, r, x, b, d)
+#define s390_lgfi(c, r, v) S390_RIL_1(c, 0xc01, r, v)
+#define s390_lgfrl(c, r1, d) S390_RIL_1(c, 0xc4c, r1, d)
+#define s390_lgfr(c, r1, r2) S390_RRE(c, 0xb914, r1, r2)
+#define s390_lgh(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d)
+#define s390_lghi(c, r, v) S390_RI(c, 0xa79, r, v)
+#define s390_lghr(c, r1, r2) S390_RRE(c, 0xb907, r1, r2)
+#define s390_lgr(c, r1, r2) S390_RRE(c, 0xb904, r1, r2)
+#define s390_lgrl(c, r1, d) S390_RIL_1(c, 0xc48, r1, d)
+#define s390_lh(c, r, x, b, d) S390_RX(c, 0x48, r, x, b, d)
+#define s390_lhr(c, r1, r2) S390_RRE(c, 0xb927, r1, r2)
+#define s390_lhg(c, r, x, b, d) S390_RXY(c, 0xe315, r, x, b, d)
+#define s390_lhi(c, r, v) S390_RI(c, 0xa78, r, v)
+#define s390_lhy(c, r, x, b, d) S390_RXY(c, 0xe378, r, x, b, d)
+#define s390_llcr(c, r1, r2) S390_RRE(c, 0xb994, r1, r2)
+#define s390_llgc(c, r, x, b, d) S390_RXY(c, 0xe390, r, x, b, d)
+#define s390_llgcr(c, r1, r2) S390_RRE(c, 0xb984, r1, r2)
+#define s390_llgf(c, r, x, b, d) S390_RXY(c, 0xe316, r, x, b, d)
+#define s390_llgfr(c, r1, r2) S390_RRE(c, 0xb916, r1, r2)
+#define s390_llgh(c, r, x, b, d) S390_RXY(c, 0xe391, r, x, b, d)
+#define s390_llghr(c, r1, r2) S390_RRE(c, 0xb985, r1, r2)
+#define s390_llhr(c, r1, r2) S390_RRE(c, 0xb995, r1, r2)
+#define s390_llihf(c, r, v) S390_RIL_1(c, 0xc0e, r, v)
+#define s390_llihh(c, r, v) S390_RI(c, 0xa5c, r, v)
+#define s390_llihl(c, r, v) S390_RI(c, 0xa5d, r, v)
+#define s390_llilf(c, r, v) S390_RIL_1(c, 0xc0f, r, v)
+#define s390_llilh(c, r, v) S390_RI(c, 0xa5e, r, v)
+#define s390_llill(c, r, v) S390_RI(c, 0xa5f, r, v)
+#define s390_lm(c, r1, r2, b, d) S390_RS_1(c, 0x98, r1, r2, b, d)
+#define s390_lmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb04, r1, r2, b, d)
+#define s390_lndbr(c, r1, r2) S390_RRE(c, 0xb311, r1, r2)
+#define s390_lngr(c, r1, r2) S390_RRE(c, 0xb901, r1, r2)
+#define s390_lnr(c, r1, r2) S390_RR(c, 0x11, r1, r2)
+#define s390_lpdbr(c, r1, r2) S390_RRE(c, 0xb310, r1, r2)
+#define s390_lpgr(c, r1, r2) S390_RRE(c, 0xb900, r1, r2)
+#define s390_lpr(c, r1, r2) S390_RR(c, 0x10, r1, r2)
+#define s390_lr(c, r1, r2) S390_RR(c, 0x18, r1, r2)
+#define s390_lrl(c, r1, d) S390_RIL_1(c, 0xc4d, r1, d)
+#define s390_ltgfr(c, r1, r2) S390_RRE(c, 0xb912, r1, r2)
+#define s390_ltgr(c, r1, r2) S390_RRE(c, 0xb902, r1, r2)
+#define s390_ltr(c, r1, r2) S390_RR(c, 0x12, r1, r2)
+#define s390_lzdr(c, r) S390_RRE(c, 0xb375, r, 0)
+#define s390_lzer(c, r) S390_RRE(c, 0xb374, r, 0)
+#define s390_m(c, r, x, b, d) S390_RX(c, 0x5c, r, x, b, d)
+#define s390_mdbr(c, r1, r2) S390_RRE(c, 0xb31c, r1, r2)
+#define s390_meebr(c, r1, r2) S390_RRE(c, 0xb317, r1, r2)
+#define s390_mfy(c, r, x, b, d) S390_RXY(c, 0xe35c, r, x, b, d)
+#define s390_mlgr(c, r1, r2) S390_RRE(c, 0xb986, r1, r2)
+#define s390_mlr(c, r1, r2) S390_RRE(c, 0xb996, r1, r2)
+#define s390_mr(c, r1, r2) S390_RR(c, 0x1c, r1, r2)
+#define s390_ms(c, r, x, b, d) S390_RX(c, 0x71, r, x, b, d)
+#define s390_msi(c, r, v) S390_RIL_1(c, 0xc21, r, v)
+#define s390_msgfr(c, r1, r2) S390_RRE(c, 0xb91c, r1, r2)
+#define s390_msgi(c, r, v) S390_RIL_1(c, 0xc20, r, v)
+#define s390_msgr(c, r1, r2) S390_RRE(c, 0xb90c, r1, r2)
+#define s390_msr(c, r1, r2) S390_RRE(c, 0xb252, r1, r2)
+#define s390_mvc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd2, l, b1, d1, b2, d2)
+#define s390_mvcl(c, r1, r2) S390_RR(c, 0x0e, r1, r2)
+#define s390_mvcle(c, r1, r3, d2, b2) S390_RS_1(c, 0xa8, r1, r3, d2, b2)
+#define s390_n(c, r, x, b, d) S390_RX(c, 0x54, r, x, b, d)
+#define s390_nc(c, l, b1, d1, b2, d2) S390_SS_1(c, 0xd4, l, b1, d1, b2, d2)
+#define s390_ng(c, r, x, b, d) S390_RXY(c, 0xe380, r, x, b, d)
+#define s390_ngr(c, r1, r2) S390_RRE(c, 0xb980, r1, r2)
+#define s390_ngrk(c, r1, r2, r3) S390_RRF_1(c, 0xb9e4, r1, r2, r3)
+#define s390_ni(c, b, d, v) S390_SI(c, 0x94, b, d, v)
+#define s390_nihf(c, r, v) S390_RIL_1(c, 0xc0a, r, v)
+#define s390_nihh(c, r, v) S390_RI(c, 0xa54, r, v)
+#define s390_nihl(c, r, v) S390_RI(c, 0xa55, r, v)
+#define s390_nilf(c, r, v) S390_RIL_1(c, 0xc0b, r, v)
+#define s390_nilh(c, r, v) S390_RI(c, 0xa56, r, v)
+#define s390_nill(c, r, v) S390_RI(c, 0xa57, r, v)
+#define s390_niy(c, b, d, v) S390_SIY(c, 0xeb54, b, d, v)
+#define s390_nop(c) S390_RR(c, 0x07, 0x0, 0)
+#define s390_nr(c, r1, r2) S390_RR(c, 0x14, r1, r2)
+#define s390_nrk(c, r1, r2) S390_RRF_1(c, 0xb9f4, r1, r2)
+#define s390_ny(c, r, x, b, d) S390_RRY(c, 0xe354, r1, r2)
+#define s390_o(c, r, x, b, d) S390_RX(c, 0x56, r, x, b, d)
+#define s390_oihf(c, r, v) S390_RIL_1(c, 0xc0c, r, v)
+#define s390_oihh(c, r, v) S390_RI(c, 0xa58, r, v)
+#define s390_oihl(c, r, v) S390_RI(c, 0xa59, r, v)
+#define s390_oilf(c, r, v) S390_RIL_1(c, 0xc0d, r, v)
+#define s390_oilh(c, r, v) S390_RI(c, 0xa5a, r, v)
+#define s390_oill(c, r, v) S390_RI(c, 0xa5b` r, v)
+#define s390_oiy(c, b, d, v) S390_SIY(c, 0xeb56 b, d, v)
+#define s390_og(c, r, x, b, d) S390_RXY(c, 0xe381, r, x, b, d)
+#define s390_ogr(c, r1, r2) S390_RRE(c, 0xb981, r1, r2)
+#define s390_or(c, r1, r2) S390_RR(c, 0x16, r1, r2)
+#define s390_s(c, r, x, b, d) S390_RX(c, 0x5b, r, x, b, d)
+#define s390_sdb(c, r, x, b, d) S390_RXE(c, 0xed1b, r, x, b, d)
+#define s390_sdbr(c, r1, r2) S390_RRE(c, 0xb31b, r1, r2)
+#define s390_sebr(c, r1, r2) S390_RRE(c, 0xb30b, r1, r2)
+#define s390_sg(c, r, x, b, d) S390_RXY(c, 0xe309, r, x, b, d)
+#define s390_sgf(c, r, x, b, d) S390_RXY(c, 0xe319, r, x, b, d)
+#define s390_sgr(c, r1, r2) S390_RRE(c, 0xb909, r1, r2)
+#define s390_sl(c, r, x, b, d) S390_RX(c, 0x5f, r, x, b, d)
+#define s390_sla(c, r, b, d) S390_RS_3(c, 0x8b, r, b, d)
+#define s390_slag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0b, r1, r2, b, d)
+#define s390_slbg(c, r, x, b, d) S390_RXY(c, 0xe389, r, x, b, d)
+#define s390_slbgr(c, r1, r2) S390_RRE(c, 0xb989, r1, r2)
+#define s390_slbr(c, r1, r2) S390_RRE(c, 0xb999, r1, r2)
+#define s390_slda(c, r, b, d) S390_RS_3(c, 0x8f, r, b, d)
+#define s390_sldl(c, r, b, d) S390_RS_3(c, 0x8d, r, b, d)
+#define s390_slfi(c, r, v) S390_RIL_1(c, 0xc25, r, v)
+#define s390_slg(c, r, x, b, d) S390_RXY(c, 0xe30b, r, x, b, d)
+#define s390_slgf(c, r, x, b, d) S390_RXY(c, 0xe31b, r, x, b, d)
+#define s390_slgfr(c, r1, r2) S390_RRE(c, 0xb91b, r1, r2)
+#define s390_slgfi(c, r, v) S390_RIL_1(c, 0xc24, r, v)
+#define s390_slgr(c, r1, r2) S390_RRE(c, 0xb90b, r1, r2)
+#define s390_sll(c, r, b, d) S390_RS_3(c, 0x89, r, b, d)
+#define s390_sllg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0d, r1, r2, b, d)
+#define s390_slr(c, r1, r2) S390_RR(c, 0x1f, r1, r2)
+#define s390_sqdbr(c, r1, r2) S390_RRE(c, 0xb315, r1, r2)
+#define s390_sqebr(c, r1, r2) S390_RRE(c, 0xb314, r1, r2)
+#define s390_sra(c, r, b, d) S390_RS_3(c, 0x8a, r, b, d)
+#define s390_srag(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0a, r1, r2, b, d)
+#define s390_sr(c, r1, r2) S390_RR(c, 0x1b, r1, r2)
+#define s390_srda(c, r, b, d) S390_RS_3(c, 0x8e, r, b, d)
+#define s390_srdl(c, r, b, d) S390_RS_3(c, 0x8c, r, b, d)
+#define s390_srl(c, r, b, d) S390_RS_3(c, 0x88, r, b, d)
+#define s390_srlg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb0c, r1, r2, b, d)
+#define s390_st(c, r, x, b, d) S390_RX(c, 0x50, r, x, b, d)
+#define s390_stam(c, r1, r2, b, d) S390_RS_1(c, 0x9b, r1, r2, b, d)
+#define s390_stc(c, r, x, b, d) S390_RX(c, 0x42, r, x, b, d)
+#define s390_stcm(c, r, m, b, d) S390_RX(c, 0xbe, r, m, b, d)
+#define s390_stcmy(c, r, x, b, d) S390_RXY(c, 0xeb2d, r, x, b, d)
+#define s390_stcy(c, r, x, b, d) S390_RXY(c, 0xe372, r, x, b, d)
+#define s390_std(c, f, x, b, d) S390_RX(c, 0x60, f, x, b, d)
+#define s390_stdy(c, r, x, b, d) S390_RXY(c, 0xed67, r, x, b, d)
+#define s390_ste(c, f, x, b, d) S390_RX(c, 0x70, f, x, b, d)
+#define s390_stey(c, r, x, b, d) S390_RXY(c, 0xed66, r, x, b, d)
+#define s390_stfpc(c, b, d) S390_S(c, 0xb29c, b, d)
+#define s390_stg(c, r, x, b, d) S390_RXY(c, 0xe324, r, x, b, d)
+#define s390_sth(c, r, x, b, d) S390_RX(c, 0x40, r, x, b, d)
+#define s390_sthy(c, r, x, b, d) S390_RXY(c, 0xe370, r, x, b, d)
+#define s390_stm(c, r1, r2, b, d) S390_RS_1(c, 0x90, r1, r2, b, d)
+#define s390_stmg(c, r1, r2, b, d) S390_RSY_1(c, 0xeb24, r1, r2, b, d)
+#define s390_sty(c, r, x, b, d) S390_RXY(c, 0xe350, r, x, b, d)
+#define s390_tcdb(c, r, x, b, d) S390_RXE(c, 0xed11, r, x, b, d)
+#define s390_tceb(c, r, x, b, d) S390_RXE(c, 0xed10, r, x, b, d)
+#define s390_x(c, r, x, b, d) S390_RX(c, 0x57, r, x, b, d)
+#define s390_xihf(c, r, v) S390_RIL_1(c, 0xc06, r, v)
+#define s390_xilf(c, r, v) S390_RIL_1(c, 0xc07, r, v)
+#define s390_xg(c, r, x, b, d) S390_RXY(c, 0xe382, r, x, b, d)
+#define s390_xgr(c, r1, r2) S390_RRE(c, 0xb982, r1, r2)
+#define s390_xr(c, r1, r2) S390_RR(c, 0x17, r1, r2)
+#define s390_xy(c, r, x, b, d) S390_RXY(c, 0xe357, r, x, b, d)
+#endif
diff --git a/src/arch/s390x/tramp.c b/src/arch/s390x/tramp.c
new file mode 100644
index 0000000..fe9f310
--- /dev/null
+++ b/src/arch/s390x/tramp.c
@@ -0,0 +1,1149 @@
+/*------------------------------------------------------------------*/
+/* */
+/* Name - tramp.c */
+/* */
+/* Function - Create trampolines to invoke arbitrary functions. */
+/* */
+/* Name - Neale Ferguson. */
+/* */
+/* Date - October, 2002 */
+/* */
+/* */
+/*------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------*/
+/* D e f i n e s */
+/*------------------------------------------------------------------*/
+
+#define PROLOG_INS 24 /* Size of emitted prolog */
+#define CALL_INS 4 /* Size of emitted call */
+#define EPILOG_INS 18 /* Size of emitted epilog */
+
+#define DEBUG(x)
+
+/*========================= End of Defines =========================*/
+
+/*------------------------------------------------------------------*/
+/* I n c l u d e s */
+/*------------------------------------------------------------------*/
+
+#ifdef NEED_MPROTECT
+# include <sys/mman.h>
+# include <limits.h> /* for PAGESIZE */
+# ifndef PAGESIZE
+# define PAGESIZE 4096
+# endif
+#endif
+
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+#include "s390x-codegen.h"
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+#include "mono/metadata/marshal.h"
+
+/*========================= End of Includes ========================*/
+
+/*------------------------------------------------------------------*/
+/* T y p e d e f s */
+/*------------------------------------------------------------------*/
+
+/*------------------------------------------------------------------*/
+/* Structure used to accummulate size of stack, code, and locals */
+/*------------------------------------------------------------------*/
+typedef struct {
+ guint stack_size,
+ local_size,
+ code_size,
+ retStruct;
+} size_data;
+
+/*========================= End of Typedefs ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - add_general */
+/* */
+/* Function - Determine code and stack size incremements for a */
+/* parameter. */
+/* */
+/*------------------------------------------------------------------*/
+
+static void inline
+add_general (guint *gr, size_data *sz, gboolean simple)
+{
+ if (simple) {
+ if (*gr >= GENERAL_REGS) {
+ sz->stack_size += sizeof(long);
+ sz->code_size += 12;
+ } else {
+ sz->code_size += 8;
+ }
+ } else {
+ if (*gr >= GENERAL_REGS - 1) {
+ sz->stack_size += 8 + (sz->stack_size % 8);
+ sz->code_size += 10;
+ } else {
+ sz->code_size += 8;
+ }
+ (*gr) ++;
+ }
+ (*gr) ++;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - calculate_sizes */
+/* */
+/* Function - Determine the amount of space required for code */
+/* and stack. In addition determine starting points */
+/* for stack-based parameters, and area for struct- */
+/* ures being returned on the stack. */
+/* */
+/*------------------------------------------------------------------*/
+
+static void inline
+calculate_sizes (MonoMethodSignature *sig, size_data *sz,
+ gboolean string_ctor)
+{
+ guint i, fr, gr, size;
+ guint32 simpletype, align;
+
+ fr = 0;
+ gr = 2;
+ sz->retStruct = 0;
+ sz->stack_size = S390_MINIMAL_STACK_SIZE;
+ sz->code_size = (PROLOG_INS + CALL_INS + EPILOG_INS);
+ sz->local_size = 0;
+
+ if (sig->hasthis) {
+ add_general (&gr, sz, TRUE);
+ }
+
+ /*----------------------------------------------------------*/
+ /* We determine the size of the return code/stack in case we*/
+ /* need to reserve a register to be used to address a stack */
+ /* area that the callee will use. */
+ /*----------------------------------------------------------*/
+
+ if (sig->ret->byref || string_ctor) {
+ sz->code_size += 8;
+ } else {
+ simpletype = sig->ret->type;
+enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ sz->code_size += 4;
+ break;
+ case MONO_TYPE_I8:
+ sz->code_size += 4;
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ gr++;
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->ret->data.klass, &align);
+ if (align > 1)
+ sz->code_size += 10;
+ switch (size) {
+ /*----------------------------------*/
+ /* On S/390, structures of size 1, */
+ /* 2, 4, and 8 bytes are returned */
+ /* in (a) register(s). */
+ /*----------------------------------*/
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ sz->code_size += 16;
+ sz->stack_size += 4;
+ break;
+ default:
+ sz->retStruct = 1;
+ sz->code_size += 32;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* We determine the size of the parameter code and stack */
+ /* requirements by checking the types and sizes of the */
+ /* parameters. */
+ /*----------------------------------------------------------*/
+
+ for (i = 0; i < sig->param_count; ++i) {
+ if (sig->params [i]->byref) {
+ add_general (&gr, sz, TRUE);
+ continue;
+ }
+ simpletype = sig->params [i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ add_general (&gr, sz, TRUE);
+ break;
+ case MONO_TYPE_SZARRAY:
+ add_general (&gr, sz, TRUE);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simpletype = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->params [i]->data.klass, &align);
+ DEBUG(printf("%d typesize: %d (%d)\n",i,size,align));
+ switch (size) {
+ /*----------------------------------*/
+ /* On S/390, structures of size 1, */
+ /* 2, 4, and 8 bytes are passed in */
+ /* (a) register(s). */
+ /*----------------------------------*/
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ add_general(&gr, sz, TRUE);
+ break;
+ case 8:
+ add_general(&gr, sz, FALSE);
+ break;
+ default:
+ sz->local_size += (size + (size % align));
+ sz->code_size += 40;
+ }
+ break;
+ case MONO_TYPE_I8:
+ add_general (&gr, sz, FALSE);
+ break;
+ case MONO_TYPE_R4:
+ if (fr < FLOAT_REGS) {
+ sz->code_size += 4;
+ fr++;
+ }
+ else {
+ sz->code_size += 4;
+ sz->stack_size += 8;
+ }
+ break;
+ case MONO_TYPE_R8:
+ if (fr < FLOAT_REGS) {
+ sz->code_size += 4;
+ fr++;
+ } else {
+ sz->code_size += 4;
+ sz->stack_size += 8 + (sz->stack_size % 8);
+ }
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params [i]->type);
+ }
+ }
+
+
+ /* align stack size to 8 */
+ DEBUG (printf (" stack size: %d (%d)\n"
+ " code size: %d\n"
+ " local size: %d\n",
+ (sz->stack_size + 8) & ~8, sz->stack_size,
+ (sz->code_size),(sz->local_size + 8) & ~8));
+ sz->stack_size = (sz->stack_size + 8) & ~8;
+ sz->local_size = (sz->local_size + 8) & ~8;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_prolog */
+/* */
+/* Function - Create the instructions that implement the stand- */
+/* ard function prolog according to the S/390 ABI. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_prolog (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ guint stack_size;
+
+ stack_size = sz->stack_size + sz->local_size;
+
+ /* function prolog */
+ s390_stmg(p, s390_r6, s390_r14, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS);
+ s390_lgr (p, s390_r11, STK_BASE);
+ s390_aghi(p, STK_BASE, -stack_size);
+ s390_stg (p, s390_r11, 0, STK_BASE, 0);
+
+ /*-----------------------------------------*/
+ /* Save: */
+ /* - address of "callme" */
+ /* - address of "retval" */
+ /* - address of "arguments" */
+ /*-----------------------------------------*/
+ s390_lgr (p, s390_r9, s390_r2);
+ s390_lgr (p, s390_r8, s390_r3);
+ s390_lgr (p, s390_r10, s390_r5);
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_save_parameters */
+/* */
+/* Function - Create the instructions that load registers with */
+/* parameters, place others on the stack according */
+/* to the S/390 ABI. */
+/* */
+/* The resulting function takes the form: */
+/* void func (void (*callme)(), void *retval, */
+/* void *this_obj, stackval *arguments); */
+/* */
+/*------------------------------------------------------------------*/
+
+inline static guint8*
+emit_save_parameters (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ guint i, fr, gr, act_strs, align,
+ stack_par_pos, size, local_pos;
+ guint32 simpletype;
+
+ /*----------------------------------------------------------*/
+ /* If a structure on stack is being returned, reserve r2 */
+ /* to point to an area where it can be passed. */
+ /*----------------------------------------------------------*/
+ if (sz->retStruct)
+ gr = 1;
+ else
+ gr = 0;
+ fr = 0;
+ act_strs = 0;
+ stack_par_pos = S390_MINIMAL_STACK_SIZE;
+ local_pos = sz->stack_size;
+
+ if (sig->hasthis) {
+ s390_lr (p, s390_r2 + gr, s390_r4);
+ gr++;
+ }
+
+ act_strs = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ DEBUG(printf("par: %d type: %d ref: %d\n",i,sig->params[i]->type,sig->params[i]->byref));
+ if (sig->params [i]->byref) {
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr ++;
+ } else {
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ continue;
+ }
+ simpletype = sig->params [i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr ++;
+ } else {
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_stg(p, s390_r0, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simpletype = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ if (sig->pinvoke)
+ size = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ size = mono_class_value_size (sig->params [i]->data.klass, &align);
+ DEBUG(printf("parStruct - size %d pinvoke: %d\n",size,sig->pinvoke));
+ switch (size) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0,ARG_BASE, STKARG);
+ s390_lgf(p, s390_r2 + gr, 0, s390_r2 + gr, 0);
+ gr++;
+ } else {
+ stack_par_pos += (stack_par_pos % align);
+ s390_lg (p, s390_r10, 0,ARG_BASE, STKARG);
+ s390_lgf(p, s390_r10, 0, s390_r10, 0);
+ s390_st (p, s390_r10, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ break;
+ case 8:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ s390_lg (p, s390_r2 + gr, 0, s390_r2 + gr, 0);
+ } else {
+ stack_par_pos += (stack_par_pos % align);
+ s390_lg (p, s390_r10, 0, ARG_BASE, STKARG);
+ s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, s390_r10, 0);
+ stack_par_pos += sizeof(long long);
+ }
+ break;
+ default:
+ if (size <= 256) {
+ local_pos += (local_pos % align);
+ s390_lg (p, s390_r13, 0, ARG_BASE, STKARG);
+ s390_mvc (p, size, STK_BASE, local_pos, s390_r13, 0);
+ s390_la (p, s390_r13, 0, STK_BASE, local_pos);
+ local_pos += size;
+ } else {
+ local_pos += (local_pos % align);
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, size);
+ s390_lg (p, s390_r1, 0, s390_r13, 0);
+ s390_lg (p, s390_r0, 0, ARG_BASE, STKARG);
+ s390_lgr (p, s390_r14, s390_r12);
+ s390_la (p, s390_r12, 0, STK_BASE, local_pos);
+ s390_lgr (p, s390_r13, s390_r1);
+ s390_mvcl (p, s390_r12, s390_r0);
+ s390_lgr (p, s390_r12, s390_r14);
+ s390_la (p, s390_r13, 0, STK_BASE, local_pos);
+ local_pos += size;
+ }
+ if (gr < GENERAL_REGS) {
+ s390_lgr(p, s390_r2 + gr, s390_r13);
+ gr++;
+ } else {
+ s390_stg(p, s390_r13, 0, STK_BASE, stack_par_pos);
+ stack_par_pos += sizeof(long);
+ }
+ }
+ break;
+ case MONO_TYPE_I8:
+ if (gr < GENERAL_REGS) {
+ s390_lg (p, s390_r2 + gr, 0, ARG_BASE, STKARG);
+ gr += 2;
+ } else {
+ *(guint32 *) p += 7;
+ *(guint32 *) p &= ~7;
+ s390_mvc (p, sizeof(long long), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long));
+ }
+ break;
+ case MONO_TYPE_R4:
+ if (fr < FLOAT_REGS) {
+ s390_le (p, s390_r0 + fr, 0, ARG_BASE, STKARG);
+ fr++;
+ } else {
+ s390_mvc (p, sizeof(float), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(float);
+ }
+ break;
+ case MONO_TYPE_R8:
+ if (fr < FLOAT_REGS) {
+ s390_ld (p, s390_r0 + fr, 0, ARG_BASE, STKARG);
+ fr++;
+ } else {
+ *(guint32 *) p += 7;
+ *(guint32 *) p &= ~7;
+ s390_mvc (p, sizeof(double), STK_BASE, stack_par_pos, ARG_BASE, STKARG);
+ stack_par_pos += sizeof(long long) + (stack_par_pos % sizeof(long long));
+ }
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params [i]->type);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* If we're returning a structure but not in a register */
+ /* then point the result area for the called routine */
+ /*----------------------------------------------------------*/
+ if (sz->retStruct) {
+ s390_lg (p, s390_r2, 0, s390_r8, 0);
+ }
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - alloc_code_memory */
+/* */
+/* Function - Allocate space to place the emitted code. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+alloc_code_memory (guint code_size)
+{
+ guint8 *p;
+
+#ifdef NEED_MPROTECT
+ p = g_malloc (code_size + PAGESIZE - 1);
+
+ /* Align to a multiple of PAGESIZE, assumed to be a power of two */
+ p = (char *)(((int) p + PAGESIZE-1) & ~(PAGESIZE-1));
+#else
+ p = g_malloc (code_size);
+#endif
+ DEBUG (printf (" align: %p (%d)\n", p, (guint)p % 4));
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_call_and_store_retval */
+/* */
+/* Function - Emit code that will implement the call to the */
+/* desired function, and unload the result according */
+/* to the S390 ABI for the type of value returned */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_call_and_store_retval (guint8 *p, MonoMethodSignature *sig,
+ size_data *sz, gboolean string_ctor)
+{
+ guint32 simpletype;
+ guint retSize, align;
+
+ /* call "callme" */
+ s390_basr (p, s390_r14, s390_r9);
+
+ /* get return value */
+ if (sig->ret->byref || string_ctor) {
+ s390_stg(p, s390_r2, 0, s390_r8, 0);
+ } else {
+ simpletype = sig->ret->type;
+enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ s390_stc (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ s390_sth (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ s390_st (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_R4:
+ s390_ste (p, s390_f0, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_R8:
+ s390_std (p, s390_f0, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_I8:
+ s390_stg (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ if (sig->pinvoke)
+ retSize = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ retSize = mono_class_value_size (sig->ret->data.klass, &align);
+printf("Returning %d bytes for type %d (%d)\n",retSize,simpletype,sig->pinvoke);
+ switch(retSize) {
+ case 0:
+ break;
+ case 1:
+ s390_stc (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 2:
+ s390_sth (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 4:
+ s390_st (p, s390_r2, 0, s390_r8, 0);
+ break;
+ case 8:
+ s390_stg (p, s390_r2, 0, s390_r8, 0);
+ break;
+ default: ;
+ /*------------------------------------------*/
+ /* The callee has already placed the result */
+ /* in the required area */
+ /*------------------------------------------*/
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x",
+ sig->ret->type);
+ }
+ }
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - emit_epilog */
+/* */
+/* Function - Create the instructions that implement the stand- */
+/* ard function epilog according to the S/390 ABI. */
+/* */
+/*------------------------------------------------------------------*/
+
+static inline guint8 *
+emit_epilog (guint8 *p, MonoMethodSignature *sig, size_data *sz)
+{
+ /* function epilog */
+ s390_lg (p, STK_BASE, 0, STK_BASE, 0);
+ s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET);
+ s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_br (p, s390_r4);
+
+ return p;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_create_trampoline. */
+/* */
+/* Function - Create the code that will allow a mono method to */
+/* invoke a system subroutine. */
+/* */
+/*------------------------------------------------------------------*/
+
+MonoPIFunc
+mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ guint8 *p, *code_buffer;
+ size_data sz;
+
+ DEBUG (printf ("\nPInvoke [start emiting]\n"));
+ calculate_sizes (sig, &sz, string_ctor);
+
+ p = code_buffer = alloc_code_memory (sz.code_size);
+ p = emit_prolog (p, sig, &sz);
+ p = emit_save_parameters (p, sig, &sz);
+ p = emit_call_and_store_retval (p, sig, &sz, string_ctor);
+ p = emit_epilog (p, sig, &sz);
+
+#ifdef NEED_MPROTECT
+ if (mprotect (code_buffer, 1024, PROT_READ | PROT_WRITE | PROT_EXEC)) {
+ g_error ("Cannot mprotect trampoline\n");
+ }
+#endif
+
+ DEBUG (printf ("emited code size: %d\n", p - code_buffer));
+
+ DEBUG (printf ("PInvoke [end emiting]\n"));
+
+ return (MonoPIFunc) code_buffer;
+}
+
+/*========================= End of Function ========================*/
+
+/*------------------------------------------------------------------*/
+/* */
+/* Name - mono_arch_create_method_pointer */
+/* */
+/* Function - Returns a pointer to a native function that can */
+/* be used to call the specified method. */
+/* */
+/* The function created will receive the arguments */
+/* according to the calling convention specified in */
+/* in the method. */
+/* */
+/* This function works by creating a MonoInvocation */
+/* structure, filling the fields in and calling */
+/* ves_exec_method() on it. */
+/* */
+/* Logic: */
+/* ------ */
+/* mono_arch_create_method_pointer (MonoMethod *method) */
+/* create the unmanaged->managed wrapper */
+/* register it with mono_jit_info_table_add() */
+/* */
+/* What does the unmanaged->managed wrapper do? */
+/* allocate a MonoInvocation structure (inv) on the stack */
+/* allocate an array of stackval on the stack with length = */
+/* method->signature->param_count + 1 [call it stack_args] */
+/* set inv->ex, inv->ex_handler, inv->parent to NULL */
+/* set inv->method to method */
+/* if method is an instance method, set inv->obj to the */
+/* 'this' argument (the first argument) else set to NULL */
+/* for each argument to the method call: */
+/* stackval_from_data (sig->params[i], &stack_args[i], */
+/* arg, sig->pinvoke); */
+/* Where: */
+/* ------ */
+/* sig - is method->signature */
+/* &stack_args[i] - is the pointer to the ith element */
+/* in the stackval array */
+/* arg - is a pointer to the argument re- */
+/* ceived by the function according */
+/* to the call convention. If it */
+/* gets passed in a register, save */
+/* on the stack first. */
+/* */
+/* set inv->retval to the address of the last element of */
+/* stack_args [recall we allocated param_count+1 of them] */
+/* call ves_exec_method(inv) */
+/* copy the returned value from inv->retval where the calling */
+/* convention expects to find it on return from the wrap- */
+/* per [if it's a structure, use stackval_to_data] */
+/* */
+/*------------------------------------------------------------------*/
+
+void *
+mono_arch_create_method_pointer (MonoMethod *method)
+{
+ MonoMethodSignature *sig;
+ MonoJitInfo *ji;
+ guint8 *p, *code_buffer;
+ guint i, align = 0, simple_type, retSize, reg_save = 0,
+ stackval_arg_pos, local_pos, float_pos,
+ local_start, reg_param = 0, stack_param,
+ this_flag, arg_pos, fpr_param, parSize;
+ guint32 simpletype;
+ size_data sz;
+ int *vtbuf, cpos, vt_cur;
+
+ sz.code_size = 1024;
+ sz.stack_size = 1024;
+ stack_param = 0;
+ fpr_param = 0;
+ arg_pos = 0;
+
+ sig = method->signature;
+
+ p = code_buffer = g_malloc (sz.code_size);
+
+ DEBUG (printf ("\nDelegate [start emiting] %s at 0x%08x\n",
+ method->name,p));
+
+ /*----------------------------------------------------------*/
+ /* prolog */
+ /*----------------------------------------------------------*/
+ s390_stmg(p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_lg (p, s390_r7, 0, STK_BASE, MINV_POS);
+ s390_lgr (p, s390_r0, STK_BASE);
+ s390_aghi(p, STK_BASE, -(sz.stack_size+MINV_POS));
+ s390_stg (p, s390_r0, 0, STK_BASE, 0);
+ s390_la (p, s390_r8, 0, STK_BASE, 4);
+ s390_lgr (p, s390_r10, s390_r8);
+ s390_lghi(p, s390_r9, sz.stack_size+92);
+ s390_lghi(p, s390_r11, 0);
+ s390_mvcl(p, s390_r8, s390_r10);
+
+ /*----------------------------------------------------------*/
+ /* Let's fill MonoInvocation - first zero some fields */
+ /*----------------------------------------------------------*/
+ s390_lghi (p, s390_r0, 0);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)));
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)));
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)));
+ s390_lghi (p, s390_r0, 1);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, invoke_trap)));
+
+ /*----------------------------------------------------------*/
+ /* set method pointer */
+ /*----------------------------------------------------------*/
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, method);
+ s390_lg (p, s390_r0, 0, s390_r13, 0);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)));
+
+ local_start = local_pos = MINV_POS +
+ sizeof (MonoInvocation) + (sig->param_count + 1) * sizeof (stackval);
+ this_flag = (sig->hasthis ? 1 : 0);
+
+ /*----------------------------------------------------------*/
+ /* if we are returning a structure, checks it's length to */
+ /* see if there's a "hidden" parameter that points to the */
+ /* area. If necessary save this hidden parameter for later */
+ /*----------------------------------------------------------*/
+ if (MONO_TYPE_ISSTRUCT(sig->ret)) {
+ if (sig->pinvoke)
+ retSize = mono_class_native_size (sig->ret->data.klass, &align);
+ else
+ retSize = mono_class_value_size (sig->ret->data.klass, &align);
+ switch(retSize) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ case 8:
+ sz.retStruct = 0;
+ break;
+ default:
+ sz.retStruct = 1;
+ s390_lgr(p, s390_r8, s390_r2);
+ reg_save = 1;
+ }
+ } else {
+ reg_save = 0;
+ }
+
+ if (this_flag) {
+ s390_stg (p, s390_r2 + reg_save, 0, STK_BASE,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ reg_param++;
+ } else {
+ s390_stg (p, s390_r2 + reg_save, 0, STK_BASE, local_pos);
+ local_pos += sizeof(int);
+ s390_stg (p, s390_r0, 0, STK_BASE,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ }
+
+ s390_stmg (p, s390_r3 + reg_param, s390_r6, STK_BASE, local_pos);
+ local_pos += 4 * sizeof(long);
+ float_pos = local_pos;
+ s390_std (p, s390_f0, 0, STK_BASE, local_pos);
+ local_pos += sizeof(double);
+ s390_std (p, s390_f2, 0, STK_BASE, local_pos);
+ local_pos += sizeof(double);
+
+ /*----------------------------------------------------------*/
+ /* prepare space for valuetypes */
+ /*----------------------------------------------------------*/
+ vt_cur = local_pos;
+ vtbuf = alloca (sizeof(int)*sig->param_count);
+ cpos = 0;
+ for (i = 0; i < sig->param_count; i++) {
+ MonoType *type = sig->params [i];
+ vtbuf [i] = -1;
+ DEBUG(printf("par: %d type: %d ref: %d\n",i,type->type,type->byref));
+ if (type->type == MONO_TYPE_VALUETYPE) {
+ MonoClass *klass = type->data.klass;
+ gint size;
+
+ if (klass->enumtype)
+ continue;
+ size = mono_class_native_size (klass, &align);
+ cpos += align - 1;
+ cpos &= ~(align - 1);
+ vtbuf [i] = cpos;
+ cpos += size;
+ }
+ }
+ cpos += 3;
+ cpos &= ~3;
+
+ local_pos += cpos;
+
+ /*----------------------------------------------------------*/
+ /* set MonoInvocation::stack_args */
+ /*----------------------------------------------------------*/
+ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation);
+ s390_la (p, s390_r0, 0, STK_BASE, stackval_arg_pos);
+ s390_stg (p, s390_r0, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)));
+
+ /*----------------------------------------------------------*/
+ /* add stackval arguments */
+ /*----------------------------------------------------------*/
+ for (i = 0; i < sig->param_count; ++i) {
+ if (sig->params [i]->byref) {
+ ADD_ISTACK_PARM(0, 1);
+ } else {
+ simple_type = sig->params [i]->type;
+ enum_savechk:
+ switch (simple_type) {
+ case MONO_TYPE_I8:
+ ADD_ISTACK_PARM(-1, 2);
+ break;
+ case MONO_TYPE_R4:
+ ADD_RSTACK_PARM(1);
+ break;
+ case MONO_TYPE_R8:
+ ADD_RSTACK_PARM(2);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params [i]->data.klass->enumtype) {
+ simple_type = sig->params [i]->data.klass->enum_basetype->type;
+ goto enum_savechk;
+ }
+ if (sig->pinvoke)
+ parSize = mono_class_native_size (sig->params [i]->data.klass, &align);
+ else
+ parSize = mono_class_value_size (sig->params [i]->data.klass, &align);
+ switch(parSize) {
+ case 0:
+ case 1:
+ case 2:
+ case 4:
+ ADD_PSTACK_PARM(0, 1);
+ break;
+ case 8:
+ ADD_PSTACK_PARM(-1, 2);
+ break;
+ default:
+ ADD_TSTACK_PARM;
+ }
+ break;
+ default:
+ ADD_ISTACK_PARM(0, 1);
+ }
+ }
+
+ if (vtbuf [i] >= 0) {
+ s390_la (p, s390_r3, 0, STK_BASE, vt_cur);
+ s390_stg (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ vt_cur += vtbuf [i];
+ } else {
+ s390_la (p, s390_r3, 0, STK_BASE, stackval_arg_pos);
+ }
+
+ /*--------------------------------------*/
+ /* Load the parameter registers for the */
+ /* call to stackval_from_data */
+ /*--------------------------------------*/
+ s390_bras (p, s390_r13, 8);
+ s390_llong(p, sig->params [i]);
+ s390_llong(p, sig->pinvoke);
+ s390_llong(p, stackval_from_data);
+ s390_lg (p, s390_r2, 0, s390_r13, 0);
+ s390_lg (p, s390_r5, 0, s390_r13, 4);
+ s390_lg (p, s390_r1, 0, s390_r13, 8);
+ s390_basr (p, s390_r14, s390_r1);
+
+ stackval_arg_pos += sizeof(stackval);
+
+ /* fixme: alignment */
+ DEBUG (printf ("arg_pos %d --> ", arg_pos));
+ if (sig->pinvoke)
+ arg_pos += mono_type_native_stack_size (sig->params [i], &align);
+ else
+ arg_pos += mono_type_stack_size (sig->params [i], &align);
+
+ DEBUG (printf ("%d\n", stackval_arg_pos));
+ }
+
+ /*----------------------------------------------------------*/
+ /* Set return area pointer. */
+ /*----------------------------------------------------------*/
+ s390_la (p, s390_r10, 0, STK_BASE, stackval_arg_pos);
+ s390_stg(p, s390_r10, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) {
+ MonoClass *klass = sig->ret->data.klass;
+ if (!klass->enumtype) {
+ s390_la (p, s390_r9, 0, s390_r10, sizeof(stackval));
+ s390_st (p, s390_r9, 0,STK_BASE, stackval_arg_pos);
+ stackval_arg_pos += sizeof(stackval);
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* call ves_exec_method */
+ /*----------------------------------------------------------*/
+ s390_bras (p, s390_r13, 4);
+ s390_llong(p, ves_exec_method);
+ s390_lg (p, s390_r1, 0, s390_r13, 0);
+ s390_la (p, s390_r2, 0, STK_BASE, MINV_POS);
+ s390_basr (p, s390_r14, s390_r1);
+
+ /*----------------------------------------------------------*/
+ /* move retval from stackval to proper place (r3/r4/...) */
+ /*----------------------------------------------------------*/
+ DEBUG(printf("retType: %d byRef: %d\n",sig->ret->type,sig->ret->byref));
+ if (sig->ret->byref) {
+ DEBUG (printf ("ret by ref\n"));
+ s390_stg(p, s390_r2, 0, s390_r10, 0);
+ } else {
+ enum_retvalue:
+ switch (sig->ret->type) {
+ case MONO_TYPE_VOID:
+ break;
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_U1:
+ s390_lghi(p, s390_r2, 0);
+ s390_ic (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ s390_lh (p, s390_r2, 0,s390_r10, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ s390_lgf(p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_I8:
+ s390_lg (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_R4:
+ s390_le (p, s390_f0, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_R8:
+ s390_ld (p, s390_f0, 0, s390_r10, 0);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ /*---------------------------------*/
+ /* Call stackval_to_data to return */
+ /* the structure */
+ /*---------------------------------*/
+ s390_bras (p, s390_r13, 8);
+ s390_llong(p, sig->ret);
+ s390_llong(p, sig->pinvoke);
+ s390_llong(p, stackval_to_data);
+ s390_lg (p, s390_r2, 0, s390_r13, 0);
+ s390_lg (p, s390_r3, 0, STK_BASE, (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+ if (sz.retStruct) {
+ /*------------------------------------------*/
+ /* Get stackval_to_data to set result area */
+ /*------------------------------------------*/
+ s390_lgr (p, s390_r4, s390_r8);
+ } else {
+ /*------------------------------------------*/
+ /* Give stackval_to_data a temp result area */
+ /*------------------------------------------*/
+ s390_la (p, s390_r4, 0, STK_BASE, stackval_arg_pos);
+ }
+ s390_lg (p, s390_r5, 0,s390_r13, 4);
+ s390_lg (p, s390_r1, 0, s390_r13, 8);
+ s390_basr (p, s390_r14, s390_r1);
+ switch (retSize) {
+ case 0:
+ break;
+ case 1:
+ s390_lghi(p, s390_r2, 0);
+ s390_ic (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 2:
+ s390_lh (p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 4:
+ s390_lgf(p, s390_r2, 0, s390_r10, 0);
+ break;
+ case 8:
+ s390_lg (p, s390_r2, 0, s390_r10, 0);
+ break;
+ default: ;
+ /*-------------------------------------------------*/
+ /* stackval_to_data has placed data in result area */
+ /*-------------------------------------------------*/
+ }
+ break;
+ default:
+ g_error ("Type 0x%x not handled yet in thunk creation",
+ sig->ret->type);
+ break;
+ }
+ }
+
+ /*----------------------------------------------------------*/
+ /* epilog */
+ /*----------------------------------------------------------*/
+ s390_lg (p, STK_BASE, 0, STK_BASE, 0);
+ s390_lg (p, s390_r4, 0, STK_BASE, S390_RET_ADDR_OFFSET);
+ s390_lmg (p, s390_r6, STK_BASE, STK_BASE, S390_REG_SAVE_OFFSET);
+ s390_br (p, s390_r4);
+
+ DEBUG (printf ("emited code size: %d\n", p - code_buffer));
+
+ DEBUG (printf ("Delegate [end emiting]\n"));
+
+ ji = g_new0 (MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = p - code_buffer;
+ ji->code_start = code_buffer;
+
+ mono_jit_info_table_add (mono_get_root_domain (), ji);
+
+ return ji->code_start;
+}
+
+/*========================= End of Function ========================*/
diff --git a/src/arch/sparc/.gitignore b/src/arch/sparc/.gitignore
new file mode 100644
index 0000000..dc1ebd2
--- /dev/null
+++ b/src/arch/sparc/.gitignore
@@ -0,0 +1,3 @@
+/Makefile
+/Makefile.in
+/.deps
diff --git a/src/arch/sparc/Makefile.am b/src/arch/sparc/Makefile.am
new file mode 100644
index 0000000..a888904
--- /dev/null
+++ b/src/arch/sparc/Makefile.am
@@ -0,0 +1,7 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-sparc.la
+
+libmonoarch_sparc_la_SOURCES = tramp.c sparc-codegen.h
+
diff --git a/src/arch/sparc/sparc-codegen.h b/src/arch/sparc/sparc-codegen.h
new file mode 100644
index 0000000..eb421bb
--- /dev/null
+++ b/src/arch/sparc/sparc-codegen.h
@@ -0,0 +1,955 @@
+#ifndef __SPARC_CODEGEN_H__
+#define __SPARC_CODEGEN_H__
+
+#if SIZEOF_VOID_P == 8
+#define SPARCV9 1
+#else
+#endif
+
+typedef enum {
+ sparc_r0 = 0,
+ sparc_r1 = 1,
+ sparc_r2 = 2,
+ sparc_r3 = 3,
+ sparc_r4 = 4,
+ sparc_r5 = 5,
+ sparc_r6 = 6,
+ sparc_r7 = 7,
+ sparc_r8 = 8,
+ sparc_r9 = 9,
+ sparc_r10 = 10,
+ sparc_r11 = 11,
+ sparc_r12 = 12,
+ sparc_r13 = 13,
+ sparc_r14 = 14,
+ sparc_r15 = 15,
+ sparc_r16 = 16,
+ sparc_r17 = 17,
+ sparc_r18 = 18,
+ sparc_r19 = 19,
+ sparc_r20 = 20,
+ sparc_r21 = 21,
+ sparc_r22 = 22,
+ sparc_r23 = 23,
+ sparc_r24 = 24,
+ sparc_r25 = 25,
+ sparc_r26 = 26,
+ sparc_r27 = 27,
+ sparc_r28 = 28,
+ sparc_r29 = 29,
+ sparc_r30 = 30,
+ sparc_r31 = 31,
+ /* aliases */
+ /* global registers */
+ sparc_g0 = 0, sparc_zero = 0,
+ sparc_g1 = 1,
+ sparc_g2 = 2,
+ sparc_g3 = 3,
+ sparc_g4 = 4,
+ sparc_g5 = 5,
+ sparc_g6 = 6,
+ sparc_g7 = 7,
+ /* out registers */
+ sparc_o0 = 8,
+ sparc_o1 = 9,
+ sparc_o2 = 10,
+ sparc_o3 = 11,
+ sparc_o4 = 12,
+ sparc_o5 = 13,
+ sparc_o6 = 14, sparc_sp = 14,
+ sparc_o7 = 15, sparc_callsite = 15,
+ /* local registers */
+ sparc_l0 = 16,
+ sparc_l1 = 17,
+ sparc_l2 = 18,
+ sparc_l3 = 19,
+ sparc_l4 = 20,
+ sparc_l5 = 21,
+ sparc_l6 = 22,
+ sparc_l7 = 23,
+ /* in registers */
+ sparc_i0 = 24,
+ sparc_i1 = 25,
+ sparc_i2 = 26,
+ sparc_i3 = 27,
+ sparc_i4 = 28,
+ sparc_i5 = 29,
+ sparc_i6 = 30, sparc_fp = 30,
+ sparc_i7 = 31,
+ sparc_nreg = 32,
+ /* floating point registers */
+ sparc_f0 = 0,
+ sparc_f1 = 1,
+ sparc_f2 = 2,
+ sparc_f3 = 3,
+ sparc_f4 = 4,
+ sparc_f5 = 5,
+ sparc_f6 = 6,
+ sparc_f7 = 7,
+ sparc_f8 = 8,
+ sparc_f9 = 9,
+ sparc_f10 = 10,
+ sparc_f11 = 11,
+ sparc_f12 = 12,
+ sparc_f13 = 13,
+ sparc_f14 = 14,
+ sparc_f15 = 15,
+ sparc_f16 = 16,
+ sparc_f17 = 17,
+ sparc_f18 = 18,
+ sparc_f19 = 19,
+ sparc_f20 = 20,
+ sparc_f21 = 21,
+ sparc_f22 = 22,
+ sparc_f23 = 23,
+ sparc_f24 = 24,
+ sparc_f25 = 25,
+ sparc_f26 = 26,
+ sparc_f27 = 27,
+ sparc_f28 = 28,
+ sparc_f29 = 29,
+ sparc_f30 = 30,
+ sparc_f31 = 31,
+} SparcRegister;
+
+typedef enum {
+ sparc_bn = 0, sparc_bnever = 0,
+ sparc_be = 1,
+ sparc_ble = 2,
+ sparc_bl = 3,
+ sparc_bleu = 4,
+ sparc_bcs = 5, sparc_blu = 5,
+ sparc_bneg = 6,
+ sparc_bvs = 7, sparc_boverflow = 7,
+ sparc_ba = 8, sparc_balways = 8,
+ sparc_bne = 9,
+ sparc_bg = 10,
+ sparc_bge = 11,
+ sparc_bgu = 12,
+ sparc_bcc = 13, sparc_beu = 13,
+ sparc_bpos = 14,
+ sparc_bvc = 15
+} SparcCond;
+
+typedef enum {
+ /* with fcmp */
+ sparc_feq = 0,
+ sparc_fl = 1,
+ sparc_fg = 2,
+ sparc_unordered = 3,
+ /* branch ops */
+ sparc_fba = 8,
+ sparc_fbn = 0,
+ sparc_fbu = 7,
+ sparc_fbg = 6,
+ sparc_fbug = 5,
+ sparc_fbl = 4,
+ sparc_fbul = 3,
+ sparc_fblg = 2,
+ sparc_fbne = 1,
+ sparc_fbe = 9,
+ sparc_fbue = 10,
+ sparc_fbge = 11,
+ sparc_fbuge = 12,
+ sparc_fble = 13,
+ sparc_fbule = 14,
+ sparc_fbo = 15
+} SparcFCond;
+
+typedef enum {
+ sparc_icc = 4,
+ sparc_xcc = 6,
+ sparc_fcc0 = 0,
+ sparc_fcc1 = 1,
+ sparc_fcc2 = 2,
+ sparc_fcc3 = 3
+} SparcCC;
+
+typedef enum {
+ sparc_icc_short = 0,
+ sparc_xcc_short = 2
+} SparcCCShort;
+
+typedef enum {
+ /* fop1 format */
+ sparc_fitos_val = 196,
+ sparc_fitod_val = 200,
+ sparc_fitoq_val = 204,
+ sparc_fxtos_val = 132,
+ sparc_fxtod_val = 136,
+ sparc_fxtoq_val = 140,
+ sparc_fstoi_val = 209,
+ sparc_fdtoi_val = 210,
+ sparc_fqtoi_val = 211,
+ sparc_fstod_val = 201,
+ sparc_fstoq_val = 205,
+ sparc_fdtos_val = 198,
+ sparc_fdtoq_val = 206,
+ sparc_fqtos_val = 199,
+ sparc_fqtod_val = 203,
+ sparc_fmovs_val = 1,
+ sparc_fmovd_val = 2,
+ sparc_fnegs_val = 5,
+ sparc_fnegd_val = 6,
+ sparc_fabss_val = 9,
+ sparc_fabsd_val = 10,
+ sparc_fsqrts_val = 41,
+ sparc_fsqrtd_val = 42,
+ sparc_fsqrtq_val = 43,
+ sparc_fadds_val = 65,
+ sparc_faddd_val = 66,
+ sparc_faddq_val = 67,
+ sparc_fsubs_val = 69,
+ sparc_fsubd_val = 70,
+ sparc_fsubq_val = 71,
+ sparc_fmuls_val = 73,
+ sparc_fmuld_val = 74,
+ sparc_fmulq_val = 75,
+ sparc_fsmuld_val = 105,
+ sparc_fdmulq_val = 111,
+ sparc_fdivs_val = 77,
+ sparc_fdivd_val = 78,
+ sparc_fdivq_val = 79,
+ /* fop2 format */
+ sparc_fcmps_val = 81,
+ sparc_fcmpd_val = 82,
+ sparc_fcmpq_val = 83,
+ sparc_fcmpes_val = 85,
+ sparc_fcmped_val = 86,
+ sparc_fcmpeq_val = 87
+} SparcFOp;
+
+typedef enum {
+ sparc_membar_load_load = 0x1,
+ sparc_membar_store_load = 0x2,
+ sparc_membar_load_store = 0x4,
+ sparc_membar_store_store = 0x8,
+
+ sparc_membar_lookaside = 0x10,
+ sparc_membar_memissue = 0x20,
+ sparc_membar_sync = 0x40,
+
+ sparc_membar_all = 0x4f
+} SparcMembarFlags;
+
+typedef struct {
+ unsigned int op : 2; /* always 1 */
+ unsigned int disp : 30;
+} sparc_format1;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int rd : 5;
+ unsigned int op2 : 3;
+ unsigned int disp : 22;
+} sparc_format2a;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int cond : 4;
+ unsigned int op2 : 3;
+ unsigned int disp : 22;
+} sparc_format2b;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int cond : 4;
+ unsigned int op2 : 3;
+ unsigned int cc01 : 2;
+ unsigned int p : 1;
+ unsigned int d19 : 19;
+} sparc_format2c;
+
+typedef struct {
+ unsigned int op : 2; /* always 0 */
+ unsigned int a : 1;
+ unsigned int res : 1;
+ unsigned int rcond: 3;
+ unsigned int op2 : 3;
+ unsigned int d16hi: 2;
+ unsigned int p : 1;
+ unsigned int rs1 : 5;
+ unsigned int d16lo: 14;
+} sparc_format2d;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int asi : 8;
+ unsigned int rs2 : 5;
+} sparc_format3a;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int x : 1;
+ unsigned int asi : 7;
+ unsigned int rs2 : 5;
+} sparc_format3ax;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int imm : 13;
+} sparc_format3b;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int x : 1;
+ unsigned int imm : 12;
+} sparc_format3bx;
+
+typedef struct {
+ unsigned int op : 2; /* 2 or 3 */
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int opf : 9;
+ unsigned int rs2 : 5;
+} sparc_format3c;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int res : 6;
+ unsigned int rs2 : 5;
+} sparc_format4a;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int rs1 : 5;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int simm : 11;
+} sparc_format4b;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int cc2 : 1;
+ unsigned int cond : 4;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int res : 6;
+ unsigned int rs2 : 5;
+} sparc_format4c;
+
+typedef struct {
+ unsigned int op : 2;
+ unsigned int rd : 5;
+ unsigned int op3 : 6;
+ unsigned int cc2 : 1;
+ unsigned int cond : 4;
+ unsigned int i : 1;
+ unsigned int cc01 : 2;
+ unsigned int simm : 11;
+} sparc_format4d;
+
+/* for use in logical ops, use 0 to not set flags */
+#define sparc_cc 16
+
+#define sparc_is_imm13(val) ((glong)val >= (glong)-(1<<12) && (glong)val <= (glong)((1<<12)-1))
+#define sparc_is_imm22(val) ((glong)val >= (glong)-(1<<21) && (glong)val <= (glong)((1<<21)-1))
+#define sparc_is_imm16(val) ((glong)val >= (glong)-(1<<15) && (glong)val <= (glong)((1<<15)-1))
+#define sparc_is_imm19(val) ((glong)val >= (glong)-(1<<18) && (glong)val <= (glong)((1<<18)-1))
+#define sparc_is_imm30(val) ((glong)val >= (glong)-(1<<29) && (glong)val <= (glong)((1<<29)-1))
+
+/* disassembly */
+#define sparc_inst_op(inst) ((inst) >> 30)
+#define sparc_inst_op2(inst) (((inst) >> 22) & 0x7)
+#define sparc_inst_rd(inst) (((inst) >> 25) & 0x1f)
+#define sparc_inst_op3(inst) (((inst) >> 19) & 0x3f)
+#define sparc_inst_i(inst) (((inst) >> 13) & 0x1)
+#define sparc_inst_rs1(inst) (((inst) >> 14) & 0x1f)
+#define sparc_inst_rs2(inst) (((inst) >> 0) & 0x1f)
+#define sparc_inst_imm(inst) (((inst) >> 13) & 0x1)
+#define sparc_inst_imm13(inst) (((inst) >> 0) & 0x1fff)
+
+#define sparc_encode_call(ins,addr) \
+ do { \
+ sparc_format1 *__f = (sparc_format1*)(ins); \
+ __f->op = 1; \
+ __f->disp = ((unsigned int)(addr) >> 2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2a(ins,val,oper,dest) \
+ do { \
+ sparc_format2a *__f = (sparc_format2a*)(ins); \
+ __f->op = 0; \
+ __f->rd = (dest); \
+ __f->op2 = (oper); \
+ __f->disp = (val) & 0x3fffff; \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2b(ins,aval,bcond,oper,disp22) \
+ do { \
+ sparc_format2b *__f = (sparc_format2b*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->cond = (bcond); \
+ __f->op2 = (oper); \
+ __f->disp = (disp22); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2c(ins,aval,bcond,oper,xcc,predict,disp19) \
+ do { \
+ sparc_format2c *__f = (sparc_format2c*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->cond = (bcond); \
+ __f->op2 = (oper); \
+ __f->cc01 = (xcc); \
+ __f->p = (predict); \
+ __f->d19 = (disp19); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format2d(ins,aval,bcond,oper,predict,r1,disp16) \
+ do { \
+ sparc_format2d *__f = (sparc_format2d*)(ins); \
+ __f->op = 0; \
+ __f->a = (aval); \
+ __f->res = 0; \
+ __f->rcond = (bcond); \
+ __f->op2 = (oper); \
+ __f->d16hi = ((disp16) >> 14); \
+ __f->p = (predict); \
+ __f->rs1 = (r1); \
+ __f->d16lo = ((disp16) & 0x3fff); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3a(ins,opval,asival,r1,r2,oper,dest) \
+ do { \
+ sparc_format3a *__f = (sparc_format3a*)(ins); \
+ __f->op = (opval); \
+ __f->asi = (asival); \
+ __f->i = 0; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3ax(ins,opval,asival,r1,r2,oper,dest) \
+ do { \
+ sparc_format3ax *__f = (sparc_format3ax*)(ins); \
+ __f->op = (opval); \
+ __f->asi = (asival); \
+ __f->i = 0; \
+ __f->x = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3b(ins,opval,r1,val,oper,dest) \
+ do { \
+ sparc_format3b *__f = (sparc_format3b*)(ins); \
+ __f->op = (opval); \
+ __f->imm = (val); \
+ __f->i = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3bx(ins,opval,r1,val,oper,dest) \
+ do { \
+ sparc_format3bx *__f = (sparc_format3bx*)(ins); \
+ __f->op = (opval); \
+ __f->imm = (val); \
+ __f->i = 1; \
+ __f->x = 1; \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format3c(ins,opval,opfval,r1,oper,r2,dest) \
+ do { \
+ sparc_format3c *__f = (sparc_format3c*)(ins); \
+ __f->op = (opval); \
+ __f->opf = (opfval); \
+ __f->rd = (dest); \
+ __f->rs1 = (r1); \
+ __f->rs2 = (r2); \
+ __f->op3 = (oper); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4a(ins,opval,oper,cc,r1,r2,dest) \
+ do { \
+ sparc_format4a *__f = (sparc_format4a*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->rs1 = (r1); \
+ __f->i = 0; \
+ __f->cc01= (cc) & 0x3; \
+ __f->res = 0; \
+ __f->rs2 = (r2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4b(ins,opval,oper,cc,r1,imm,dest) \
+ do { \
+ sparc_format4b *__f = (sparc_format4b*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->rs1 = (r1); \
+ __f->i = 1; \
+ __f->cc01= (cc) & 0x3; \
+ __f->simm = (imm); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4c(ins,opval,oper,cc,bcond,r2,dest) \
+ do { \
+ sparc_format4c *__f = (sparc_format4c*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->cc2 = ((xcc) >> 2) & 0x1; \
+ __f->cond = bcond; \
+ __f->i = 0; \
+ __f->cc01= (xcc) & 0x3; \
+ __f->res = 0; \
+ __f->rs2 = (r2); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+#define sparc_encode_format4d(ins,opval,oper,xcc,bcond,imm,dest) \
+ do { \
+ sparc_format4d *__f = (sparc_format4d*)(ins); \
+ __f->op = (opval); \
+ __f->rd = (dest); \
+ __f->op3 = (oper); \
+ __f->cc2 = ((xcc) >> 2) & 0x1; \
+ __f->cond = bcond; \
+ __f->i = 1; \
+ __f->cc01= (xcc) & 0x3; \
+ __f->simm = (imm); \
+ (ins) = (unsigned int*)__f + 1; \
+ } while (0)
+
+/* is it useful to provide a non-default value? */
+#define sparc_asi 0x0
+
+/* load */
+#define sparc_ldsb(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),9,(dest))
+#define sparc_ldsb_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),9,(dest))
+
+#define sparc_ldsh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),10,(dest))
+#define sparc_ldsh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),10,(dest))
+
+#define sparc_ldub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),1,(dest))
+#define sparc_ldub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),1,(dest))
+
+#define sparc_lduh(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),2,(dest))
+#define sparc_lduh_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),2,(dest))
+
+#define sparc_ld(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),0,(dest))
+#define sparc_ld_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),0,(dest))
+
+/* Sparc V9 */
+#define sparc_ldx(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),11,(dest))
+#define sparc_ldx_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),11,(dest))
+
+#define sparc_ldsw(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),8,(dest))
+#define sparc_ldsw_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),8,(dest))
+
+#define sparc_ldd(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),3,(dest))
+#define sparc_ldd_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),3,(dest))
+
+#define sparc_ldf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),32,(dest))
+#define sparc_ldf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),32,(dest))
+
+#define sparc_lddf(ins,base,disp,dest) sparc_encode_format3a((ins),3,0,(base),(disp),35,(dest))
+#define sparc_lddf_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),35,(dest))
+
+/* store */
+#define sparc_stb(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),5,(src))
+#define sparc_stb_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),5,(src))
+
+#define sparc_sth(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),6,(src))
+#define sparc_sth_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),6,(src))
+
+#define sparc_st(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),4,(src))
+#define sparc_st_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),4,(src))
+
+/* Sparc V9 */
+#define sparc_stx(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),14,(src))
+#define sparc_stx_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),14,(src))
+
+#define sparc_std(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),7,(src))
+#define sparc_std_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),7,(src))
+
+#define sparc_stf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),36,(src))
+#define sparc_stf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),36,(src))
+
+#define sparc_stdf(ins,src,base,disp) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),39,(src))
+#define sparc_stdf_imm(ins,src,base,disp) sparc_encode_format3b((ins),3,(base),(disp),39,(src))
+
+/* swap */
+#define sparc_ldstub(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),13,(dest))
+#define sparc_ldstub_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),13,(dest))
+
+#define sparc_swap(ins,base,disp,dest) sparc_encode_format3a((ins),3,sparc_asi,(base),(disp),15,(dest))
+#define sparc_swap_imm(ins,base,disp,dest) sparc_encode_format3b((ins),3,(base),(disp),15,(dest))
+
+/* misc */
+/* note: with sethi val is the full 32 bit value (think of it as %hi(val)) */
+#define sparc_sethi(ins,val,dest) sparc_encode_format2a((ins),((val)>>10),4,(dest))
+
+#define sparc_nop(ins) sparc_sethi((ins),0,sparc_zero)
+
+#define sparc_save(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),60,(dest))
+#define sparc_save_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),60,(dest))
+
+#define sparc_restore(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),61,(dest))
+#define sparc_restore_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),61,(dest))
+
+#define sparc_rett(ins,src,disp) sparc_encode_format3a((ins),2,0,(src),(disp),0x39,0)
+#define sparc_rett_imm(ins,src,disp) sparc_encode_format3b((ins),2,(src),(disp),0x39,0)
+
+#define sparc_jmpl(ins,base,disp,dest) sparc_encode_format3a((ins),2,0,(base),(disp),56,(dest))
+#define sparc_jmpl_imm(ins,base,disp,dest) sparc_encode_format3b((ins),2,(base),(disp),56,(dest))
+
+#define sparc_call_simple(ins,disp) sparc_encode_call((ins),((unsigned int)(disp)))
+
+#define sparc_rdy(ins,dest) sparc_encode_format3a((ins),2,0,0,0,40,(dest))
+
+#define sparc_wry(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),48,0)
+#define sparc_wry_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),48,0)
+
+/* stbar, unimp, flush */
+#define sparc_stbar(ins) sparc_encode_format3a((ins),2,0,15,0,40,0)
+#define sparc_unimp(ins,val) sparc_encode_format2b((ins),0,0,0,(val))
+
+#define sparc_flush(ins,base,disp) sparc_encode_format3a((ins),2,0,(base),(disp),59,0)
+#define sparc_flush_imm(ins,base,disp) sparc_encode_format3b((ins),2,(base),(disp),59,0)
+
+#define sparc_flushw(ins) sparc_encode_format3a((ins),2,0,0,0,43,0)
+
+#define sparc_membar(ins,flags) sparc_encode_format3b ((ins), 2, 0xf, (flags), 0x28, 0)
+
+/* trap */
+
+#define sparc_ta(ins,tt) sparc_encode_format3b((ins),2,0,(tt),58,0x8)
+
+/* alu fop */
+/* provide wrappers for: fitos, fitod, fstoi, fdtoi, fstod, fdtos, fmov, fneg, fabs */
+
+#define sparc_fop(ins,r1,op,r2,dest) sparc_encode_format3c((ins),2,(op),(r1),52,(r2),(dest))
+#define sparc_fcmp(ins,r1,op,r2) sparc_encode_format3c((ins),2,(op),(r1),53,(r2),0)
+
+/* format 1 fops */
+#define sparc_fadds(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fadds_val, r2, dest )
+#define sparc_faddd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddd_val, r2, dest )
+#define sparc_faddq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_faddq_val, r2, dest )
+
+#define sparc_fsubs(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubs_val, r2, dest )
+#define sparc_fsubd(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubd_val, r2, dest )
+#define sparc_fsubq(ins, r1, r2, dest) sparc_fop( ins, r1, sparc_fsubq_val, r2, dest )
+
+#define sparc_fmuls( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuls_val, r2, dest )
+#define sparc_fmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmuld_val, r2, dest )
+#define sparc_fmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fmulq_val, r2, dest )
+
+#define sparc_fsmuld( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fsmuld_val, r2, dest )
+#define sparc_fdmulq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdmulq_val, r2, dest )
+
+#define sparc_fdivs( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivs_val, r2, dest )
+#define sparc_fdivd( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivd_val, r2, dest )
+#define sparc_fdivq( ins, r1, r2, dest ) sparc_fop( ins, r1, sparc_fdivq_val, r2, dest )
+
+#define sparc_fitos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitos_val, r2, dest )
+#define sparc_fitod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitod_val, r2, dest )
+#define sparc_fitoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fitoq_val, r2, dest )
+
+#define sparc_fxtos( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtos_val, r2, dest )
+#define sparc_fxtod( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtod_val, r2, dest )
+#define sparc_fxtoq( ins, r2, dest) sparc_fop( ins, 0, sparc_fxtoq_val, r2, dest )
+
+#define sparc_fstoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoi_val, r2, dest )
+#define sparc_fdtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoi_val, r2, dest )
+#define sparc_fqtoi( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtoi_val, r2, dest )
+
+#define sparc_fstod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstod_val, r2, dest )
+#define sparc_fstoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fstoq_val, r2, dest )
+
+#define sparc_fdtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtos_val, r2, dest )
+#define sparc_fdtoq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fdtoq_val, r2, dest )
+
+#define sparc_fqtos( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtos_val, r2, dest )
+#define sparc_fqtod( ins, r2, dest ) sparc_fop( ins, 0, sparc_fqtod_val, r2, dest )
+
+#define sparc_fmovs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fmovs_val, r2, dest )
+#define sparc_fnegs( ins, r2, dest ) sparc_fop( ins, 0, sparc_fnegs_val, r2, dest )
+#define sparc_fabss( ins, r2, dest ) sparc_fop( ins, 0, sparc_fabss_val, r2, dest )
+
+#define sparc_fmovd( ins, r2, dest) sparc_fop (ins, 0, sparc_fmovd_val, r2, dest);
+#define sparc_fnegd( ins, r2, dest) sparc_fop (ins, 0, sparc_fnegd_val, r2, dest);
+#define sparc_fabsd( ins, r2, dest) sparc_fop (ins, 0, sparc_fabsd_val, r2, dest);
+
+#define sparc_fsqrts( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrts_val, r2, dest )
+#define sparc_fsqrtd( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtd_val, r2, dest )
+#define sparc_fsqrtq( ins, r2, dest ) sparc_fop( ins, 0, sparc_fsqrtq_val, r2, dest )
+
+/* format 2 fops */
+
+#define sparc_fcmps( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmps_val, r2 )
+#define sparc_fcmpd( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpd_val, r2 )
+#define sparc_fcmpq( ins, r1, r2 ) sparc_fcmp( ins, r1, sparc_fcmpq_val, r2 )
+#define sparc_fcmpes( ins, r1, r2 ) sparc_fcmpes( ins, r1, sparc_fcmpes_val, r2 )
+#define sparc_fcmped( ins, r1, r2 ) sparc_fcmped( ins, r1, sparc_fcmped_val, r2 )
+#define sparc_fcmpeq( ins, r1, r2 ) sparc_fcmpeq( ins, r1, sparc_fcmpeq_val, r2 )
+
+/* logical */
+
+/* FIXME: condense this using macros */
+/* FIXME: the setcc stuff is wrong in lots of places */
+
+#define sparc_logic(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),((setcc) ? 0x10 : 0) | (op), (dest))
+#define sparc_logic_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),((setcc) ? 0x10 : 0) | (op), (dest))
+
+#define sparc_and(ins,setcc,r1,r2,dest) sparc_logic(ins,1,setcc,r1,r2,dest)
+#define sparc_and_imm(ins,setcc,r1,imm,dest) sparc_logic_imm(ins,1,setcc,r1,imm,dest)
+
+#define sparc_andn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|5,(dest))
+#define sparc_andn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|5,(dest))
+
+#define sparc_or(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|2,(dest))
+#define sparc_or_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|2,(dest))
+
+#define sparc_orn(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|6,(dest))
+#define sparc_orn_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|6,(dest))
+
+#define sparc_xor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|3,(dest))
+#define sparc_xor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm), (setcc)|3,(dest))
+
+#define sparc_xnor(ins,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),(setcc)|7,(dest))
+#define sparc_xnor_imm(ins,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),(setcc)|7,(dest))
+
+/* shift */
+#define sparc_sll(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),37,(dest))
+#define sparc_sll_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),37,(dest))
+
+/* Sparc V9 */
+#define sparc_sllx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),37,(dest))
+#define sparc_sllx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),37,(dest))
+
+#define sparc_srl(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),38,(dest))
+#define sparc_srl_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),38,(dest))
+
+/* Sparc V9 */
+#define sparc_srlx(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),38,(dest))
+#define sparc_srlx_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),38,(dest))
+
+#define sparc_sra(ins,src,disp,dest) sparc_encode_format3a((ins),2,0,(src),(disp),39,(dest))
+#define sparc_sra_imm(ins,src,disp,dest) sparc_encode_format3b((ins),2,(src),(disp),39,(dest))
+
+/* Sparc V9 */
+#define sparc_srax(ins,src,disp,dest) sparc_encode_format3ax((ins),2,0,(src),(disp),39,(dest))
+#define sparc_srax_imm(ins,src,disp,dest) sparc_encode_format3bx((ins),2,(src),(disp),39,(dest))
+
+/* alu */
+
+#define sparc_alu_reg(ins,op,setcc,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),op|((setcc) ? 0x10 : 0),(dest))
+#define sparc_alu_imm(ins,op,setcc,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),op|((setcc) ? 0x10 : 0),(dest))
+
+#define sparc_add(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0,(setcc),(r1),(r2),(dest))
+#define sparc_add_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0,(setcc),(r1),(imm),(dest))
+
+#define sparc_addx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x8,(setcc),(r1),(r2),(dest))
+#define sparc_addx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x8,(setcc),(r1),(imm),(dest))
+
+#define sparc_sub(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0x4,(setcc),(r1),(r2),(dest))
+#define sparc_sub_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0x4,(setcc),(r1),(imm),(dest))
+
+#define sparc_subx(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xc,(setcc),(r1),(r2),(dest))
+#define sparc_subx_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xc,(setcc),(r1),(imm),(dest))
+
+#define sparc_muls(ins,r1,r2,dest) sparc_encode_format3a((ins),2,0,(r1),(r2),36,(dest))
+#define sparc_muls_imm(ins,r1,imm,dest) sparc_encode_format3b((ins),2,(r1),(imm),36,(dest))
+
+#define sparc_umul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xa,(setcc),(r1),(r2),(dest))
+#define sparc_umul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xa,(setcc),(r1),(imm),(dest))
+
+#define sparc_smul(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xb,(setcc),(r1),(r2),(dest))
+#define sparc_smul_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xb,(setcc),(r1),(imm),(dest))
+
+#define sparc_udiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xe,(setcc),(r1),(r2),(dest))
+#define sparc_udiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xe,(setcc),(r1),(imm),(dest))
+
+#define sparc_sdiv(ins,setcc,r1,r2,dest) sparc_alu_reg((ins),0xf,(setcc),(r1),(r2),(dest))
+#define sparc_sdiv_imm(ins,setcc,r1,imm,dest) sparc_alu_imm((ins),0xf,(setcc),(r1),(imm),(dest))
+
+
+/* branch */
+#define sparc_branch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),2,(displ))
+/* FIXME: float condition codes are different: unify. */
+#define sparc_fbranch(ins,aval,condval,displ) sparc_encode_format2b((ins),(aval),(condval),6,(displ))
+#define sparc_branchp(ins,aval,condval,xcc,predict,displ) sparc_encode_format2c((ins),(aval),(condval),0x1,(xcc),(predict),(displ))
+
+#define sparc_brz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x1,0x3,(predict),(rs1),(disp))
+#define sparc_brlez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x2,0x3,(predict),(rs1),(disp))
+#define sparc_brlz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x3,0x3,(predict),(rs1),(disp))
+#define sparc_brnz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x5,0x3,(predict),(rs1),(disp))
+#define sparc_brgz(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x6,0x3,(predict),(rs1),(disp))
+#define sparc_brgez(ins,aval,predict,rs1,disp) sparc_encode_format2d((ins), (aval),0x7,0x3,(predict),(rs1),(disp))
+
+/* conditional moves */
+#define sparc_movcc(ins,cc,condval,r1,dest) sparc_encode_format4c((ins), 0x2, 0x2c, cc, condval, r1, dest)
+
+#define sparc_movcc_imm(ins,cc,condval,imm,dest) sparc_encode_format4d((ins), 0x2, 0x2c, cc, condval, imm, dest)
+
+/* synthetic instructions */
+#define sparc_cmp(ins,r1,r2) sparc_sub((ins),sparc_cc,(r1),(r2),sparc_g0)
+#define sparc_cmp_imm(ins,r1,imm) sparc_sub_imm((ins),sparc_cc,(r1),(imm),sparc_g0)
+#define sparc_jmp(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_g0)
+#define sparc_jmp_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_g0)
+#define sparc_call(ins,base,disp) sparc_jmpl((ins),(base),(disp),sparc_o7)
+#define sparc_call_imm(ins,base,disp) sparc_jmpl_imm((ins),(base),(disp),sparc_o7)
+
+#define sparc_test(ins,reg) sparc_or ((ins),sparc_cc,sparc_g0,(reg),sparc_g0)
+
+#define sparc_ret(ins) sparc_jmpl_imm((ins),sparc_i7,8,sparc_g0)
+#define sparc_retl(ins) sparc_jmpl_imm((ins),sparc_o7,8,sparc_g0)
+#define sparc_restore_simple(ins) sparc_restore((ins),sparc_g0,sparc_g0,sparc_g0)
+#define sparc_rett_simple(ins) sparc_rett_imm((ins),sparc_i7,8)
+
+#define sparc_set32(ins,val,reg) \
+ do { \
+ if ((val) == 0) \
+ sparc_clr_reg((ins),(reg)); \
+ else if (((guint32)(val) & 0x3ff) == 0) \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \
+ else { \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \
+ } \
+ } while (0)
+
+#ifdef SPARCV9
+#define SPARC_SET_MAX_SIZE (6 * 4)
+#else
+#define SPARC_SET_MAX_SIZE (2 * 4)
+#endif
+
+#if SPARCV9
+#define sparc_set(ins,ptr,reg) \
+ do { \
+ g_assert ((reg) != sparc_g1); \
+ gint64 val = (gint64)ptr; \
+ guint32 top_word = (val) >> 32; \
+ guint32 bottom_word = (val) & 0xffffffff; \
+ if (val == 0) \
+ sparc_clr_reg ((ins), reg); \
+ else if ((val >= -4096) && ((val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,bottom_word,(reg)); \
+ else if ((val >= 0) && (val <= 4294967295L)) { \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ if (bottom_word & 0x3ff) \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ } \
+ else if ((val >= 0) && (val <= (1L << 44) - 1)) { \
+ sparc_sethi ((ins), (val >> 12), (reg)); \
+ sparc_or_imm ((ins), FALSE, (reg), (val >> 12) & 0x3ff, (reg)); \
+ sparc_sllx_imm ((ins),(reg), 12, (reg)); \
+ sparc_or_imm ((ins), FALSE, (reg), (val) & 0xfff, (reg)); \
+ } \
+ else if (top_word == 0xffffffff) { \
+ sparc_xnor ((ins), FALSE, sparc_g0, sparc_g0, sparc_g1); \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \
+ } \
+ else { \
+ sparc_sethi((ins),top_word,sparc_g1); \
+ sparc_sethi((ins),bottom_word,(reg)); \
+ sparc_or_imm((ins),FALSE,sparc_g1,top_word&0x3ff,sparc_g1); \
+ sparc_or_imm((ins),FALSE,(reg),bottom_word&0x3ff,(reg)); \
+ sparc_sllx_imm((ins),sparc_g1,32,sparc_g1); \
+ sparc_or((ins),FALSE,(reg),sparc_g1,(reg)); \
+ } \
+ } while (0)
+#else
+#define sparc_set(ins,val,reg) \
+ do { \
+ if ((val) == 0) \
+ sparc_clr_reg((ins),(reg)); \
+ else if (((guint32)(val) & 0x3ff) == 0) \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ else if (((gint32)(val) >= -4096) && ((gint32)(val) <= 4095)) \
+ sparc_or_imm((ins),FALSE,sparc_g0,(gint32)(val),(reg)); \
+ else { \
+ sparc_sethi((ins),(guint32)(val),(reg)); \
+ sparc_or_imm((ins),FALSE,(reg),(guint32)(val)&0x3ff,(reg)); \
+ } \
+ } while (0)
+#endif
+
+#define sparc_set_ptr(ins,val,reg) sparc_set(ins,val,reg)
+
+#ifdef SPARCV9
+#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff7fffffff, reg)
+#else
+#define sparc_set_template(ins,reg) sparc_set (ins,0x7fffffff, reg)
+#endif
+
+#define sparc_not(ins,reg) sparc_xnor((ins),FALSE,(reg),sparc_g0,(reg))
+#define sparc_neg(ins,reg) sparc_sub((ins),FALSE,sparc_g0,(reg),(reg))
+#define sparc_clr_reg(ins,reg) sparc_or((ins),FALSE,sparc_g0,sparc_g0,(reg))
+
+#define sparc_mov_reg_reg(ins,src,dest) sparc_or((ins),FALSE,sparc_g0,(src),(dest))
+
+#ifdef SPARCV9
+#define sparc_sti_imm sparc_stx_imm
+#define sparc_ldi_imm sparc_ldx_imm
+#define sparc_sti sparc_stx
+#define sparc_ldi sparc_ldx
+#else
+#define sparc_sti_imm sparc_st_imm
+#define sparc_ldi_imm sparc_ld_imm
+#define sparc_sti sparc_st
+#define sparc_ldi sparc_ld
+#endif
+
+#endif /* __SPARC_CODEGEN_H__ */
+
diff --git a/src/arch/sparc/test.c b/src/arch/sparc/test.c
new file mode 100644
index 0000000..0d4ad18
--- /dev/null
+++ b/src/arch/sparc/test.c
@@ -0,0 +1,123 @@
+#include <glib.h>
+#include "sparc-codegen.h"
+
+/* don't run the resulting program, it will destroy your computer,
+ * just objdump -d it to inspect we generated the correct assembler.
+ */
+
+int
+main ()
+{
+ guint32 *p;
+ guint32 code_buffer [500];
+ guint32 local_size = 0, stack_size = 0, code_size = 6;
+ guint32 arg_pos, simpletype;
+ unsigned char *ins;
+ int i, stringp, cur_out_reg, size;
+
+ p = code_buffer;
+
+ printf (".text\n.align 4\n.globl main\n.type main,@function\nmain:\n");
+
+ /*
+ * Standard function prolog.
+ */
+ sparc_save_imm (p, sparc_sp, -112-stack_size, sparc_sp);
+ cur_out_reg = sparc_o0;
+ arg_pos = 0;
+
+ if (1) {
+ sparc_mov_reg_reg (p, sparc_i2, cur_out_reg);
+ ++cur_out_reg;
+ }
+
+ sparc_ld_imm (p, sparc_i3, arg_pos, cur_out_reg);
+ ++cur_out_reg;
+ sparc_ld_imm (p, sparc_i3, arg_pos+4, cur_out_reg);
+ ++cur_out_reg;
+ /*
+ * Insert call to function
+ */
+ sparc_jmpl (p, sparc_i0, 0, sparc_callsite);
+ sparc_nop (p);
+
+ sparc_jmpl_imm (p, sparc_i7, 8, sparc_zero);
+ sparc_restore (p, sparc_zero, sparc_zero, sparc_zero);
+
+ sparc_ldsb (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldsb_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldsh (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldsh_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldub (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldub_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_lduh (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_lduh_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_ldf (p, sparc_i3, sparc_l0, sparc_o5);
+ sparc_ldf_imm (p, sparc_i3, 2, sparc_o5);
+
+ sparc_stb (p, sparc_i3, sparc_l0, sparc_l2);
+ sparc_stb_imm (p, sparc_i3, sparc_o5, 2);
+
+ sparc_sethi (p, 0xff000000, sparc_o2);
+ sparc_rdy (p, sparc_l0);
+ sparc_wry (p, sparc_l0, sparc_l1);
+ sparc_wry_imm (p, sparc_l0, 16);
+ sparc_stbar (p);
+ sparc_unimp (p, 24);
+ sparc_flush (p, sparc_l4, 0);
+
+ sparc_and (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_and_imm (p, FALSE, sparc_l0, 0xff, sparc_o1);
+ sparc_andn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_or (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_orn (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_xor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_xnor (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_sll (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sll_imm (p, sparc_l0, 2, sparc_o1);
+ sparc_srl (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_srl_imm (p, sparc_l0, 2, sparc_o1);
+ sparc_sra (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sra_imm (p, sparc_l0, 2, sparc_o1);
+
+ sparc_add (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_add_imm (p, FALSE, sparc_l0, 0xff, sparc_o1);
+ sparc_addx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sub (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_subx (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_muls (p, sparc_l0, sparc_l1, sparc_o1);
+ sparc_umul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_smul (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_udiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+ sparc_sdiv (p, sparc_cc, sparc_l0, sparc_l1, sparc_o1);
+
+ sparc_branch (p, FALSE, sparc_bne, -12);
+ sparc_ret (p);
+ sparc_retl (p);
+ sparc_test (p, sparc_l4);
+ sparc_cmp (p, sparc_l4, sparc_l6);
+ sparc_cmp_imm (p, sparc_l4, 4);
+ sparc_restore_simple (p);
+
+ sparc_set (p, 0xff000000, sparc_l7);
+ sparc_set (p, 1, sparc_l7);
+ sparc_set (p, 0xff0000ff, sparc_l7);
+
+ sparc_not (p, sparc_g2);
+ sparc_neg (p, sparc_g3);
+ sparc_clr_reg (p, sparc_g4);
+
+
+ size = (p-code_buffer)*4;
+ ins = (gchar*)code_buffer;
+ for (i = 0; i < size; ++i)
+ printf (".byte %d\n", (unsigned int) ins [i]);
+ return 0;
+}
+
diff --git a/src/arch/sparc/tramp.c b/src/arch/sparc/tramp.c
new file mode 100644
index 0000000..19c0a78
--- /dev/null
+++ b/src/arch/sparc/tramp.c
@@ -0,0 +1,1080 @@
+/* -*- Mode: C; tab-width: 8; indent-tabs-mode: t; c-basic-offset: 8 -*- */
+/*
+ * Create trampolines to invoke arbitrary functions.
+ *
+ * Copyright (C) Ximian Inc.
+ *
+ * Authors: Paolo Molaro (lupus@ximian.com)
+ * Jeffrey Stedfast <fejj@ximian.com>
+ * Mark Crichton <crichton@gimp.org>
+ *
+ */
+
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+#include "sparc-codegen.h"
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+#include "mono/metadata/debug-helpers.h"
+#include "mono/metadata/marshal.h"
+
+
+#define ARG_SIZE sizeof (stackval)
+#define PROLOG_INS 1
+#define CALL_INS 3 /* Max 3. 1 for the jmpl and 1 for the nop and 1 for the possible unimp */
+#define EPILOG_INS 2
+#define FLOAT_REGS 32
+#define OUT_REGS 6
+#define LOCAL_REGS 8
+#define SLOT_SIZE sizeof(gpointer)
+#if SPARCV9
+#define MINIMAL_STACK_SIZE 22
+#define BIAS 2047
+#define FRAME_ALIGN 16
+#else
+#define MINIMAL_STACK_SIZE 23
+#define BIAS 0
+#define FRAME_ALIGN 8
+#endif
+
+#define NOT_IMPL(x) g_error("FIXME: %s", x);
+/*#define DEBUG(a) a*/
+#define DEBUG(a)
+
+/* Some assembly... */
+#ifdef __GNUC__
+#define flushi(addr) __asm__ __volatile__ ("flush %0"::"r"(addr):"memory")
+#else
+static void flushi(void *addr)
+{
+ asm("flush %i0");
+}
+#endif
+
+static char*
+sig_to_name (MonoMethodSignature *sig, const char *prefix)
+{
+ int i;
+ char *result;
+ GString *res = g_string_new ("");
+ char *p;
+
+ if (prefix) {
+ g_string_append (res, prefix);
+ g_string_append_c (res, '_');
+ }
+
+ mono_type_get_desc (res, sig->ret, TRUE);
+
+ for (i = 0; i < sig->param_count; ++i) {
+ g_string_append_c (res, '_');
+ mono_type_get_desc (res, sig->params [i], TRUE);
+ }
+ result = res->str;
+ p = result;
+ /* remove chars Sun's asssembler doesn't like */
+ while (*p != '\0') {
+ if (*p == '.' || *p == '/')
+ *p = '_';
+ else if (*p == '&')
+ *p = '$';
+ else if (*p == '[' || *p == ']')
+ *p = 'X';
+ p++;
+ }
+ g_string_free (res, FALSE);
+ return result;
+}
+
+static void
+sparc_disassemble_code (guint32 *code_buffer, guint32 *p, const char *id)
+{
+ guchar *cp;
+ FILE *ofd;
+
+ if (!(ofd = fopen ("/tmp/test.s", "w")))
+ g_assert_not_reached();
+
+ fprintf (ofd, "%s:\n", id);
+
+ for (cp = (guchar *)code_buffer; cp < (guchar *)p; cp++)
+ fprintf (ofd, ".byte %d\n", *cp);
+
+ fclose (ofd);
+
+#ifdef __GNUC__
+ system ("as /tmp/test.s -o /tmp/test.o;objdump -d /tmp/test.o");
+#else
+ /* this assumes we are using Sun tools as we aren't GCC */
+#if SPARCV9
+ system ("as -xarch=v9 /tmp/test.s -o /tmp/test.o;dis /tmp/test.o");
+#else
+ system ("as /tmp/test.s -o /tmp/test.o;dis /tmp/test.o");
+#endif
+#endif
+}
+
+
+static void
+add_general (guint *gr, guint *stack_size, guint *code_size, gboolean simple)
+{
+ if (simple) {
+ if (*gr >= OUT_REGS) {
+ *stack_size += SLOT_SIZE;
+ *code_size += 12;
+ } else {
+ *code_size += 4;
+ }
+ } else {
+ if (*gr >= OUT_REGS - 1) {
+ *stack_size += 8 + (*stack_size % 8); /* ???64 */
+ *code_size += 16;
+ } else {
+ *code_size += 16;
+ }
+ (*gr)++;
+ }
+ (*gr)++;
+}
+
+static void
+calculate_sizes (MonoMethodSignature *sig, guint *stack_size, guint *code_size,
+ gboolean string_ctor, gboolean *use_memcpy)
+{
+ guint i, fr, gr;
+ guint32 simpletype;
+
+ fr = gr = 0;
+ *stack_size = MINIMAL_STACK_SIZE * SLOT_SIZE;
+ *code_size = (PROLOG_INS + CALL_INS + EPILOG_INS) * 4;
+
+ /* function arguments */
+ if (sig->hasthis)
+ add_general (&gr, stack_size, code_size, TRUE);
+
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref) {
+ add_general (&gr, stack_size, code_size, TRUE);
+ continue;
+ }
+ simpletype = sig->params[i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_R4:
+#if SPARCV9
+ (*code_size) += 4; /* for the fdtos */
+#else
+ (*code_size) += 12;
+ (*stack_size) += 4;
+#endif
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ add_general (&gr, stack_size, code_size, TRUE);
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ guint32 align;
+ if (sig->params[i]->data.klass->enumtype) {
+ simpletype = sig->params[i]->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ size = mono_class_native_size (sig->params[i]->data.klass, &align);
+#if SPARCV9
+ if (size != 4) {
+#else
+ if (1) {
+#endif
+ DEBUG(fprintf(stderr, "copy %d byte struct on stack\n", size));
+ *use_memcpy = TRUE;
+ *code_size += 8*4;
+
+ *stack_size = (*stack_size + (align - 1)) & (~(align -1));
+ *stack_size += (size + 3) & (~3);
+ if (gr > OUT_REGS) {
+ *code_size += 4;
+ *stack_size += 4;
+ }
+ } else {
+ add_general (&gr, stack_size, code_size, TRUE);
+#if SPARCV9
+ *code_size += 8;
+#else
+ *code_size += 4;
+#endif
+ }
+ break;
+ }
+ case MONO_TYPE_I8:
+ case MONO_TYPE_R8:
+ add_general (&gr, stack_size, code_size, FALSE);
+ break;
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params[i]->type);
+ }
+ }
+
+ /* function return value */
+ if (sig->ret->byref || string_ctor) {
+ *code_size += 8;
+ } else {
+ simpletype = sig->ret->type;
+ enum_retvalue:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_R8:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ *code_size += 8;
+ break;
+ case MONO_TYPE_I8:
+ *code_size += 12;
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+#if SPARCV9
+ if (size <= 32)
+ *code_size += 8 + (size + 7) / 2;
+ else
+ *code_size += 8;
+#else
+ *code_size += 8;
+#endif
+ break;
+ }
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+
+ if (*use_memcpy) {
+ *stack_size += 8;
+ *code_size += 24;
+ if (sig->hasthis) {
+ *stack_size += SLOT_SIZE;
+ *code_size += 4;
+ }
+ }
+
+ *stack_size = (*stack_size + (FRAME_ALIGN - 1)) & (~(FRAME_ALIGN -1));
+}
+
+static inline guint32 *
+emit_epilog (guint32 *p, MonoMethodSignature *sig, guint stack_size)
+{
+ int ret_offset = 8;
+
+ /*
+ * Standard epilog.
+ * 8 may be 12 when returning structures (to skip unimp opcode).
+ */
+#if !SPARCV9
+ if (sig != NULL && !sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype)
+ ret_offset = 12;
+#endif
+ sparc_jmpl_imm (p, sparc_i7, ret_offset, sparc_zero);
+ sparc_restore (p, sparc_zero, sparc_zero, sparc_zero);
+
+ return p;
+}
+
+static inline guint32 *
+emit_prolog (guint32 *p, MonoMethodSignature *sig, guint stack_size)
+{
+ /* yes kids, it is this simple! */
+ sparc_save_imm (p, sparc_sp, -stack_size, sparc_sp);
+ return p;
+}
+
+#if SPARCV9
+#define sparc_st_ptr(a,b,c,d) sparc_stx(a,b,c,d)
+#define sparc_st_imm_ptr(a,b,c,d) sparc_stx_imm(a,b,c,d)
+#define sparc_ld_ptr(a,b,c,d) sparc_ldx(a,b,c,d)
+#define sparc_ld_imm_ptr(a,b,c,d) sparc_ldx_imm(a,b,c,d)
+#else
+#define sparc_st_ptr(a,b,c,d) sparc_st(a,b,c,d)
+#define sparc_st_imm_ptr(a,b,c,d) sparc_st_imm(a,b,c,d)
+#define sparc_ld_ptr(a,b,c,d) sparc_ld(a,b,c,d)
+#define sparc_ld_imm_ptr(a,b,c,d) sparc_ld_imm(a,b,c,d)
+#endif
+
+/* synonyms for when values are really widened scalar values */
+#define sparc_st_imm_word sparc_st_imm_ptr
+
+#define ARG_BASE sparc_i3 /* pointer to args in i3 */
+#define SAVE_PTR_IN_GENERIC_REGISTER \
+ if (gr < OUT_REGS) { \
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr); \
+ gr++; \
+ } else { \
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0); \
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp, stack_par_pos); \
+ stack_par_pos += SLOT_SIZE; \
+ }
+
+#if SPARCV9
+/* This is a half hearted attempt at coping with structs by value - the
+ actual convention is complicated when floats & doubles are involved as
+ you end up with fields in different registers on/off the stack.
+ It will take more time to get right... */
+static guint32 *
+v9_struct_arg(guint32 *p, int arg_index, MonoClass *klass, int size, guint *p_gr)
+{
+ MonoMarshalType *info = mono_marshal_load_type_info (klass);
+ int off = 0;
+ int index = 0;
+ guint gr = *p_gr;
+ sparc_ld_imm_ptr (p, ARG_BASE, arg_index*ARG_SIZE, sparc_l0);
+ if (size > 8) {
+ if (info->fields [index].field->type->type == MONO_TYPE_R8) {
+ sparc_lddf_imm (p, sparc_l0, 0, sparc_f0 + 2 * gr);
+ index++;
+ }
+ else {
+ sparc_ldx_imm (p, sparc_l0, 0, sparc_o0 + gr);
+ index++; /* FIXME could be multiple fields in one register */
+ }
+ gr++;
+ size -= 8;
+ off = 8;
+ }
+ if (size > 0) {
+ if (info->fields [index].field->type->type == MONO_TYPE_R8) {
+ sparc_lddf_imm (p, sparc_l0, off, sparc_f0 + 2 * gr);
+ index++;
+ }
+ else {
+ /* will load extra garbage off end of short structs ... */
+ sparc_ldx_imm (p, sparc_l0, off, sparc_o0 + gr);
+ }
+ gr++;
+ }
+ *p_gr = gr;
+ return p;
+}
+#endif
+
+static inline guint32*
+emit_save_parameters (guint32 *p, MonoMethodSignature *sig, guint stack_size,
+ gboolean use_memcpy)
+{
+ guint i, fr, gr, stack_par_pos, struct_pos, cur_struct_pos;
+ guint32 simpletype;
+
+ fr = gr = 0;
+ stack_par_pos = MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS;
+
+ if (sig->hasthis) {
+ if (use_memcpy) {
+ /* we don't need to save a thing. */
+ } else
+ sparc_mov_reg_reg (p, sparc_i2, sparc_o0);
+ gr ++;
+ }
+
+ if (use_memcpy) {
+ cur_struct_pos = struct_pos = stack_par_pos;
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref)
+ continue;
+ if (sig->params[i]->type == MONO_TYPE_VALUETYPE &&
+ !sig->params[i]->data.klass->enumtype) {
+ gint size;
+ guint32 align;
+
+ size = mono_class_native_size (sig->params[i]->data.klass, &align);
+#if SPARCV9
+ if (size != 4) {
+#else
+ if (1) {
+#endif
+ /* Add alignment */
+ stack_par_pos = (stack_par_pos + (align - 1)) & (~(align - 1));
+ /* need to call memcpy here */
+ sparc_add_imm (p, 0, sparc_sp, stack_par_pos, sparc_o0);
+ sparc_ld_imm_ptr (p, sparc_i3, i*16, sparc_o1);
+ sparc_set (p, (guint32)size, sparc_o2);
+ sparc_set_ptr (p, (void *)memmove, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+ stack_par_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1));
+ }
+ }
+ }
+ }
+
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->byref) {
+ MonoClass *klass = sig->ret->data.klass;
+ if (!klass->enumtype) {
+ gint size = mono_class_native_size (klass, NULL);
+
+ DEBUG(fprintf(stderr, "retval value type size: %d\n", size));
+#if SPARCV9
+ if (size > 32) {
+#else
+ {
+#endif
+ /* pass on buffer in interp.c to called function */
+ sparc_ld_imm_ptr (p, sparc_i1, 0, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp, 64);
+ }
+ }
+ }
+
+ DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)));
+
+ for (i = 0; i < sig->param_count; i++) {
+ if (sig->params[i]->byref) {
+ SAVE_PTR_IN_GENERIC_REGISTER;
+ continue;
+ }
+ simpletype = sig->params[i]->type;
+ enum_calc_size:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ if (gr < OUT_REGS) {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+
+ case MONO_TYPE_R4:
+#if SPARCV9
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f30); /* fix using this fixed reg */
+ sparc_fdtos(p, sparc_f30, sparc_f0 + 2 * gr + 1);
+ gr++;
+ break;
+#else
+ /* Convert from double to single */
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0);
+ sparc_fdtos (p, sparc_f0, sparc_f0);
+
+ /*
+ * FIXME: Is there an easier way to do an
+ * freg->ireg move ?
+ */
+ sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos);
+
+ if (gr < OUT_REGS) {
+ sparc_ld_imm (p, sparc_sp, stack_par_pos, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ldf_imm (p, sparc_sp, stack_par_pos, sparc_f0);
+ sparc_stf_imm (p, sparc_f0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+#endif
+
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_SZARRAY:
+ SAVE_PTR_IN_GENERIC_REGISTER;
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ guint32 align;
+ MonoClass *klass = sig->params[i]->data.klass;
+ if (klass->enumtype) {
+ simpletype = klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+ size = mono_class_native_size (klass, &align);
+#if SPARCV9
+ if (size <= 16) {
+ if (gr < OUT_REGS) {
+ p = v9_struct_arg(p, i, klass, size, &gr);
+ } else {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ }
+#else
+ /*
+ * FIXME: The 32bit ABI docs do not mention that small
+ * structures are passed in registers.
+ */
+
+ /*
+ if (size == 4) {
+ if (gr < OUT_REGS) {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ld_imm_ptr (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_ld_imm (p, sparc_l0, 0, sparc_l0);
+ sparc_st_imm_word (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ }
+ */
+#endif
+
+ cur_struct_pos = (cur_struct_pos + (align - 1)) & (~(align - 1));
+ if (gr < OUT_REGS) {
+ sparc_add_imm (p, 0, sparc_sp,
+ cur_struct_pos, sparc_o0 + gr);
+ gr ++;
+ } else {
+ sparc_ld_imm_ptr (p, sparc_sp,
+ cur_struct_pos,
+ sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1,
+ sparc_sp,
+ stack_par_pos);
+ }
+ cur_struct_pos += (size + (SLOT_SIZE - 1)) & (~(SLOT_SIZE - 1));
+ break;
+ }
+
+#if SPARCV9
+ case MONO_TYPE_I8:
+ if (gr < OUT_REGS) {
+ sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr++;
+ } else {
+ sparc_ldx_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_stx_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+ case MONO_TYPE_R8:
+ sparc_lddf_imm (p, ARG_BASE, i*ARG_SIZE, sparc_f0 + 2 * i);
+ break;
+#else
+ case MONO_TYPE_I8:
+ case MONO_TYPE_R8:
+ if (gr < (OUT_REGS - 1)) {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr ++;
+
+ sparc_ld_imm (p, ARG_BASE,
+ (i*ARG_SIZE) + 4,
+ sparc_o0 + gr);
+ gr ++;
+ } else if (gr == (OUT_REGS - 1)) {
+ /* Split register/stack */
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_o0 + gr);
+ gr ++;
+
+ sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ } else {
+ sparc_ld_imm (p, ARG_BASE, i*ARG_SIZE, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+
+ sparc_ld_imm (p, ARG_BASE, (i*ARG_SIZE) + 4, sparc_l0);
+ sparc_st_imm (p, sparc_l0, sparc_sp, stack_par_pos);
+ stack_par_pos += SLOT_SIZE;
+ }
+ break;
+#endif
+ default:
+ g_error ("Can't trampoline 0x%x", sig->params[i]->type);
+ }
+ }
+
+ g_assert ((stack_par_pos - BIAS) <= stack_size);
+
+ return p;
+}
+
+static inline guint32 *
+alloc_code_memory (guint code_size)
+{
+ guint32 *p;
+
+ p = g_malloc(code_size);
+
+ return p;
+}
+
+static inline guint32 *
+emit_call_and_store_retval (guint32 *p, MonoMethodSignature *sig,
+ guint stack_size, gboolean string_ctor)
+{
+ guint32 simpletype;
+
+ /* call "callme" */
+ sparc_jmpl_imm (p, sparc_i0, 0, sparc_callsite);
+ sparc_nop (p);
+#if !SPARCV9
+ if (sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) {
+ int size = mono_class_native_size (sig->ret->data.klass, NULL);
+ sparc_unimp (p, size & 4095);
+ }
+#endif
+
+ /* get return value */
+ if (sig->ret->byref || string_ctor) {
+ sparc_st_ptr (p, sparc_o0, sparc_i1, 0);
+ } else {
+ simpletype = sig->ret->type;
+ enum_retval:
+ switch (simpletype) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ sparc_stb (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ sparc_sth (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ sparc_st (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_PTR:
+ sparc_st_ptr (p, sparc_o0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_R4:
+ sparc_stf (p, sparc_f0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_R8:
+ sparc_stdf (p, sparc_f0, sparc_i1, 0);
+ break;
+ case MONO_TYPE_I8:
+#if SPARCV9
+ sparc_stx (p, sparc_o0, sparc_i1, 0);
+#else
+ sparc_std (p, sparc_o0, sparc_i1, 0);
+#endif
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retval;
+ }
+#if SPARCV9
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+ if (size <= 32) {
+ int n_regs = size / 8;
+ int j;
+ sparc_ldx_imm (p, sparc_i1, 0, sparc_i1);
+ /* wrong if there are floating values in the struct... */
+ for (j = 0; j < n_regs; j++) {
+ sparc_stx_imm (p, sparc_o0 + j, sparc_i1, j * 8);
+ }
+ size -= n_regs * 8;
+ if (size > 0) {
+ int last_reg = sparc_o0 + n_regs;
+ /* get value right aligned in register */
+ sparc_srlx_imm(p, last_reg, 64 - 8 * size, last_reg);
+ if ((size & 1) != 0) {
+ sparc_stb_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 1);
+ size--;
+ if (size > 0)
+ sparc_srlx_imm(p, last_reg, 8, last_reg);
+ }
+ if ((size & 2) != 0) {
+ sparc_sth_imm (p, last_reg, sparc_i1, n_regs * 8 + size - 2);
+ size -= 2;
+ if (size > 0)
+ sparc_srlx_imm(p, last_reg, 16, last_reg);
+ }
+ if ((size & 4) != 0)
+ sparc_st_imm (p, last_reg, sparc_i1, n_regs * 8);
+ }
+ }
+#endif
+ }
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ g_error ("Can't handle as return value 0x%x", sig->ret->type);
+ }
+ }
+ return p;
+}
+
+MonoPIFunc
+mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ guint32 *p, *code_buffer;
+ guint stack_size, code_size, i;
+ gboolean use_memcpy = FALSE;
+ static GHashTable *cache = NULL;
+ MonoPIFunc res;
+
+ if (!cache)
+ cache = g_hash_table_new ((GHashFunc)mono_signature_hash,
+ (GCompareFunc)mono_metadata_signature_equal);
+
+ if ((res = (MonoPIFunc)g_hash_table_lookup(cache, sig)))
+ return res;
+
+ calculate_sizes (sig, &stack_size, &code_size,
+ string_ctor, &use_memcpy);
+
+ p = code_buffer = alloc_code_memory (code_size);
+ p = emit_prolog (p, sig, stack_size);
+ p = emit_save_parameters (p, sig, stack_size, use_memcpy);
+ p = emit_call_and_store_retval (p, sig, stack_size, string_ctor);
+ /* we don't return structs here so pass in NULL as signature */
+ p = emit_epilog (p, NULL, stack_size);
+
+ g_assert(p <= code_buffer + (code_size / 4));
+
+ DEBUG(sparc_disassemble_code (code_buffer, p, sig_to_name(sig, NULL)));
+
+ /* So here's the deal...
+ * UltraSPARC will flush a whole cache line at a time
+ * BUT, older SPARCs won't.
+ * So, be compatable and flush dwords at a time...
+ */
+
+ for (i = 0; i < ((p - code_buffer)/2); i++)
+ flushi((code_buffer + (i*8)));
+
+ g_hash_table_insert(cache, sig, code_buffer);
+
+ return (MonoPIFunc)code_buffer;
+}
+
+#define MINV_POS (MINIMAL_STACK_SIZE * SLOT_SIZE + BIAS)
+
+void *
+mono_arch_create_method_pointer (MonoMethod *method)
+{
+ MonoMethodSignature *sig;
+ MonoJitInfo *ji;
+ guint stack_size, code_size, stackval_arg_pos, local_pos;
+ guint i, local_start, reg_param = 0, stack_param, cpos, vt_cur;
+ guint32 align = 0;
+ guint32 *p, *code_buffer;
+ gint *vtbuf;
+ gint32 simpletype;
+
+ code_size = 1024; /* these should be calculated... */
+ stack_size = 1024;
+ stack_param = 0;
+
+ sig = method->signature;
+
+ p = code_buffer = g_malloc (code_size);
+
+ DEBUG(fprintf(stderr, "Delegate [start emiting] %s\n", method->name));
+ DEBUG(fprintf(stderr, "%s\n", sig_to_name(sig, FALSE)));
+
+ p = emit_prolog (p, sig, stack_size);
+
+ /* fill MonoInvocation */
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex)));
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, ex_handler)));
+ sparc_st_imm_ptr (p, sparc_g0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, parent)));
+
+ sparc_set_ptr (p, (void *)method, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, method)));
+
+ stackval_arg_pos = MINV_POS + sizeof (MonoInvocation);
+ local_start = local_pos = stackval_arg_pos + (sig->param_count + 1) * sizeof (stackval);
+
+ if (sig->hasthis) {
+ sparc_st_imm_ptr (p, sparc_i0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, obj)));
+ reg_param = 1;
+ }
+
+ if (sig->param_count) {
+ gint save_count = MIN (OUT_REGS, sig->param_count + sig->hasthis);
+ for (i = reg_param; i < save_count; i++) {
+ sparc_st_imm_ptr (p, sparc_i0 + i, sparc_sp, local_pos);
+ local_pos += SLOT_SIZE;
+ }
+ }
+
+ /* prepare space for valuetypes */
+ vt_cur = local_pos;
+ vtbuf = alloca (sizeof(int)*sig->param_count);
+ cpos = 0;
+ for (i = 0; i < sig->param_count; i++) {
+ MonoType *type = sig->params [i];
+ vtbuf [i] = -1;
+ if (!sig->params[i]->byref && type->type == MONO_TYPE_VALUETYPE) {
+ MonoClass *klass = type->data.klass;
+ gint size;
+
+ if (klass->enumtype)
+ continue;
+ size = mono_class_native_size (klass, &align);
+ cpos += align - 1;
+ cpos &= ~(align - 1);
+ vtbuf [i] = cpos;
+ cpos += size;
+ }
+ }
+ cpos += SLOT_SIZE - 1;
+ cpos &= ~(SLOT_SIZE - 1);
+
+ local_pos += cpos;
+
+ /* set MonoInvocation::stack_args */
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0);
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, stack_args)));
+
+ /* add stackval arguments */
+ for (i=0; i < sig->param_count; i++) {
+ int stack_offset;
+ int type;
+ if (reg_param < OUT_REGS) {
+ stack_offset = local_start + i * SLOT_SIZE;
+ reg_param++;
+ } else {
+ stack_offset = stack_size + 8 + stack_param;
+ stack_param++;
+ }
+
+ if (!sig->params[i]->byref) {
+ type = sig->params[i]->type;
+ enum_arg:
+ switch (type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R8:
+ break;
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ stack_offset += SLOT_SIZE - 4;
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ stack_offset += SLOT_SIZE - 2;
+ break;
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_BOOLEAN:
+ stack_offset += SLOT_SIZE - 1;
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->params[i]->data.klass->enumtype) {
+ type = sig->params[i]->data.klass->enum_basetype->type;
+ goto enum_arg;
+ }
+ g_assert(vtbuf[i] >= 0);
+ break;
+ default:
+ g_error ("can not cope with delegate arg type %d", type);
+ }
+ }
+
+ sparc_add_imm (p, 0, sparc_sp, stack_offset, sparc_o2);
+
+ if (vtbuf[i] >= 0) {
+ sparc_add_imm (p, 0, sparc_sp, vt_cur, sparc_o1);
+ sparc_st_imm_ptr (p, sparc_o1, sparc_sp, stackval_arg_pos);
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos,
+ sparc_o1);
+ sparc_ld_imm_ptr (p, sparc_o2, 0, sparc_o2);
+ vt_cur += vtbuf[i];
+ } else {
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos,
+ sparc_o1);
+ }
+
+ sparc_set_ptr (p, (void *)sig->params[i], sparc_o0);
+ sparc_set (p, (guint32)sig->pinvoke, sparc_o3);
+
+ /* YOU make the CALL! */
+ sparc_set_ptr (p, (void *)stackval_from_data, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+ stackval_arg_pos += sizeof(stackval);
+ }
+
+ /* return value storage */
+ /* Align to dword */
+ stackval_arg_pos = (stackval_arg_pos + (8 - 1)) & (~(8 -1));
+ if (sig->param_count) {
+ sparc_add_imm (p, 0, sparc_sp, stackval_arg_pos, sparc_l0);
+ }
+ if (!sig->ret->byref && sig->ret->type == MONO_TYPE_VALUETYPE && !sig->ret->data.klass->enumtype) {
+#if !SPARCV9
+ /* pass on callers buffer */
+ sparc_ld_imm_ptr (p, sparc_fp, 64, sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0);
+#else
+ sparc_add_imm (p, 0, sparc_l0, sizeof(stackval), sparc_l1);
+ sparc_st_imm_ptr (p, sparc_l1, sparc_l0, 0);
+#endif
+ }
+
+ sparc_st_imm_ptr (p, sparc_l0, sparc_sp,
+ (MINV_POS + G_STRUCT_OFFSET (MonoInvocation, retval)));
+
+ /* call ves_exec_method */
+ sparc_add_imm (p, 0, sparc_sp, MINV_POS, sparc_o0);
+ sparc_set_ptr (p, (void *)ves_exec_method, sparc_l0);
+ sparc_jmpl_imm (p, sparc_l0, 0, sparc_callsite);
+ sparc_nop (p);
+
+ /* move retval from stackval to proper place (r3/r4/...) */
+ if (sig->ret->byref) {
+ sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0 );
+ } else {
+ enum_retvalue:
+ switch (sig->ret->type) {
+ case MONO_TYPE_VOID:
+ break;
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ case MONO_TYPE_CLASS:
+ sparc_ld_imm_ptr (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+#if SPARCV9
+ sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+#else
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos, sparc_i0);
+ sparc_ld_imm (p, sparc_sp, stackval_arg_pos + 4, sparc_i1);
+#endif
+ break;
+ case MONO_TYPE_R4:
+ sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0);
+ sparc_fdtos(p, sparc_f0, sparc_f0);
+ break;
+ case MONO_TYPE_R8:
+ sparc_lddf_imm (p, sparc_sp, stackval_arg_pos, sparc_f0);
+ break;
+ case MONO_TYPE_VALUETYPE: {
+ gint size;
+ gint reg = sparc_i0;
+ if (sig->ret->data.klass->enumtype) {
+ simpletype = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+#if SPARCV9
+ size = mono_class_native_size (sig->ret->data.klass, NULL);
+ sparc_ldx_imm (p, sparc_sp, stackval_arg_pos, sparc_l0);
+ if (size <= 16) {
+ gint off = 0;
+ if (size >= 8) {
+ sparc_ldx_imm (p, sparc_l0, 0, reg);
+ size -= 8;
+ off += 8;
+ reg++;
+ }
+ if (size > 0)
+ sparc_ldx_imm (p, sparc_l0, off, reg);
+ } else
+ NOT_IMPL("value type as ret val from delegate");
+#endif
+ break;
+ }
+ default:
+ g_error ("Type 0x%x not handled yet in thunk creation",
+ sig->ret->type);
+ break;
+ }
+ }
+
+ p = emit_epilog (p, sig, stack_size);
+
+ for (i = 0; i < ((p - code_buffer)/2); i++)
+ flushi((code_buffer + (i*8)));
+
+ ji = g_new0 (MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = p - code_buffer;
+ ji->code_start = code_buffer;
+
+ mono_jit_info_table_add (mono_get_root_domain (), ji);
+
+ DEBUG(sparc_disassemble_code (code_buffer, p, method->name));
+
+ DEBUG(fprintf(stderr, "Delegate [end emiting] %s\n", method->name));
+
+ return ji->code_start;
+}
diff --git a/src/arch/x64/.gitignore b/src/arch/x64/.gitignore
new file mode 100644
index 0000000..6930f61
--- /dev/null
+++ b/src/arch/x64/.gitignore
@@ -0,0 +1,4 @@
+/Makefile.in
+/Makefile
+/.deps
+/.libs
diff --git a/src/arch/x64/Makefile.am b/src/arch/x64/Makefile.am
new file mode 100644
index 0000000..db9d583
--- /dev/null
+++ b/src/arch/x64/Makefile.am
@@ -0,0 +1,2 @@
+EXTRA_DIST = x64-codegen.h
+
diff --git a/src/arch/x64/x64-codegen.h b/src/arch/x64/x64-codegen.h
new file mode 100644
index 0000000..02b9907
--- /dev/null
+++ b/src/arch/x64/x64-codegen.h
@@ -0,0 +1,1938 @@
+/*
+ * x64-codegen.h: Macros for generating x86-64 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ * Zalman Stern
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef X64_H
+#define X64_H
+
+#include "../x86/x86-codegen.h"
+
+#include <stdint.h>
+
+/* x86-64 general purpose registers */
+typedef enum {
+ X64_RAX = 0,
+ X64_RCX = 1,
+ X64_RDX = 2,
+ X64_RBX = 3,
+ X64_RSP = 4,
+ X64_RBP = 5,
+ X64_RSI = 6,
+ X64_RDI = 7,
+ X64_R8 = 8,
+ X64_R9 = 9,
+ X64_R10 = 10,
+ X64_R11 = 11,
+ X64_R12 = 12,
+ X64_R13 = 13,
+ X64_R14 = 14,
+ X64_R15 = 15,
+ X64_RIP = 16,
+ X64_NREG
+} X64_Reg_No;
+
+/* x86-64 XMM registers */
+typedef enum {
+ X64_XMM0 = 0,
+ X64_XMM1 = 1,
+ X64_XMM2 = 2,
+ X64_XMM3 = 3,
+ X64_XMM4 = 4,
+ X64_XMM5 = 5,
+ X64_XMM6 = 6,
+ X64_XMM7 = 7,
+ X64_XMM8 = 8,
+ X64_XMM9 = 9,
+ X64_XMM10 = 10,
+ X64_XMM11 = 11,
+ X64_XMM12 = 12,
+ X64_XMM13 = 13,
+ X64_XMM14 = 14,
+ X64_XMM15 = 15,
+ X64_XMM_NREG = 16,
+} X64_XMM_Reg_No;
+
+typedef enum
+{
+ X64_REX_B = 1, /* The register in r/m field, base register in SIB byte, or reg in opcode is 8-15 rather than 0-7 */
+ X64_REX_X = 2, /* The index register in SIB byte is 8-15 rather than 0-7 */
+ X64_REX_R = 4, /* The reg field of ModRM byte is 8-15 rather than 0-7 */
+ X64_REX_W = 8 /* Opeartion is 64-bits instead of 32 (default) or 16 (with 0x66 prefix) */
+} X64_REX_Bits;
+
+#if defined(__native_client_codegen__)
+
+#define x64_codegen_pre(inst) uint8_t* _codegen_start = (inst); x64_nacl_instruction_pre();
+#define x64_codegen_post(inst) (x64_nacl_instruction_post(&_codegen_start, &(inst)), _codegen_start);
+
+/* Because of rex prefixes, etc, call sequences are not constant size. */
+/* These pre- and post-sequence hooks remedy this by aligning the call */
+/* sequence after we emit it, since we will know the exact size then. */
+#define x64_call_sequence_pre(inst) uint8_t* _code_start = (inst);
+#define x64_call_sequence_post(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+
+/* Native client can load/store using one of the following registers */
+/* as a base: rip, r15, rbp, rsp. Any other base register needs to have */
+/* its upper 32 bits cleared and reference memory using r15 as the base. */
+#define x64_is_valid_nacl_base(reg) \
+ ((reg) == X64_RIP || (reg) == X64_R15 || \
+ (reg) == X64_RBP || (reg) == X64_RSP)
+#else
+
+#define x64_codegen_pre(inst)
+#define x64_codegen_post(inst)
+
+#endif /* __native_client_codegen__ */
+
+#ifdef TARGET_WIN32
+#define X64_ARG_REG1 X64_RCX
+#define X64_ARG_REG2 X64_RDX
+#define X64_ARG_REG3 X64_R8
+#define X64_ARG_REG4 X64_R9
+#else
+#define X64_ARG_REG1 X64_RDI
+#define X64_ARG_REG2 X64_RSI
+#define X64_ARG_REG3 X64_RDX
+#define X64_ARG_REG4 X64_RCX
+#endif
+
+#ifdef TARGET_WIN32
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_R15) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#elif defined(__native_client_codegen__)
+/* x64 Native Client code may not write R15 */
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_RSI) | (1 << X64_RDI) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#else
+#define X64_CALLEE_REGS ((1 << X64_RAX) | (1 << X64_RCX) | (1 << X64_RDX) | (1 << X64_RSI) | (1 << X64_RDI) | (1 << X64_R8) | (1 << X64_R9) | (1 << X64_R10))
+#define X64_IS_CALLEE_REG(reg) (X64_CALLEE_REGS & (1 << (reg)))
+
+#define X64_ARGUMENT_REGS ((1 << X64_RDI) | (1 << X64_RSI) | (1 << X64_RDX) | (1 << X64_RCX) | (1 << X64_R8) | (1 << X64_R9))
+#define X64_IS_ARGUMENT_REG(reg) (X64_ARGUMENT_REGS & (1 << (reg)))
+
+#define X64_CALLEE_SAVED_REGS ((1 << X64_RBX) | (1 << X64_R12) | (1 << X64_R13) | (1 << X64_R14) | (1 << X64_R15) | (1 << X64_RBP))
+#define X64_IS_CALLEE_SAVED_REG(reg) (X64_CALLEE_SAVED_REGS & (1 << (reg)))
+#endif
+
+#define X64_REX(bits) ((unsigned char)(0x40 | (bits)))
+
+#if defined(__native_client_codegen__)
+#define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _x64_rex_bits = \
+ (((width) > 4) ? X64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? X64_REX_R : 0) | \
+ (((reg_index) > 7) ? X64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \
+ x64_nacl_tag_rex((inst)); \
+ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \
+ } while (0)
+#else
+#define x64_emit_rex(inst, width, reg_modrm, reg_index, reg_rm_base_opcode) do \
+ { \
+ unsigned char _x64_rex_bits = \
+ (((width) > 4) ? X64_REX_W : 0) | \
+ (((reg_modrm) > 7) ? X64_REX_R : 0) | \
+ (((reg_index) > 7) ? X64_REX_X : 0) | \
+ (((reg_rm_base_opcode) > 7) ? X64_REX_B : 0); \
+ if ((_x64_rex_bits != 0) || (((width) == 1))) *(inst)++ = X64_REX(_x64_rex_bits); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+typedef union {
+ uint64_t val;
+ unsigned char b[8];
+} x64_imm_buf;
+
+/* In 64 bit mode, all registers have a low byte subregister */
+#undef X86_IS_BYTE_REG
+#define X86_IS_BYTE_REG(reg) 1
+
+#define x64_modrm_mod(modrm) ((modrm) >> 6)
+#define x64_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define x64_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define x64_rex_r(rex) ((((rex) >> 2) & 0x1) << 3)
+#define x64_rex_x(rex) ((((rex) >> 1) & 0x1) << 3)
+#define x64_rex_b(rex) ((((rex) >> 0) & 0x1) << 3)
+
+#define x64_sib_scale(sib) ((sib) >> 6)
+#define x64_sib_index(sib) (((sib) >> 3) & 0x7)
+#define x64_sib_base(sib) ((sib) & 0x7)
+
+#define x64_is_imm32(val) ((int64_t)val >= -((int64_t)1<<31) && (int64_t)val <= (((int64_t)1<<31)-1))
+
+#define x86_imm_emit64(inst,imm) \
+ do { \
+ x64_imm_buf imb; \
+ imb.val = (uint64_t) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ *(inst)++ = imb.b [4]; \
+ *(inst)++ = imb.b [5]; \
+ *(inst)++ = imb.b [6]; \
+ *(inst)++ = imb.b [7]; \
+ } while (0)
+
+#define x64_membase_emit(inst,reg,basereg,disp) do { \
+ if ((basereg) == X64_RIP) { \
+ x86_address_byte ((inst), 0, (reg)&0x7, 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ else \
+ x86_membase_emit ((inst),(reg)&0x7, (basereg)&0x7, (disp)); \
+} while (0)
+
+#define x64_memindex_emit(inst, reg, basereg, disp, indexreg, shift) \
+ x86_memindex_emit((inst), ((reg) & 0x7), ((basereg) & 0x7), (disp), ((indexreg) & 0x7), (shift))
+
+#define x64_alu_reg_imm_size_body(inst,opc,reg,imm,size) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((reg) == X64_RAX) { \
+ x64_emit_rex(inst, size, 0, 0, 0); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } else { \
+ x64_emit_rex(inst, size, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x64_alu_reg_reg_size_body(inst,opc,dreg,reg,size) \
+ do { \
+ x64_emit_rex(inst, size, (dreg), 0, (reg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* NaCl modules may not directly update RSP or RBP other than direct copies */
+/* between them. Instead the lower 4 bytes are updated and then added to R15 */
+#define x64_is_nacl_stack_reg(reg) (((reg) == X64_RSP) || ((reg) == X64_RBP))
+
+#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ do{ \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg(reg)) { \
+ if (((opc) != X86_ADD) && ((opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((dreg)) && ((reg) != X64_R15)) { \
+ if (((opc) != X86_ADD && (opc) != X86_SUB)) \
+ g_assert_not_reached(); \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), 4); \
+ /* Use LEA instead of ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (dreg), (dreg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) \
+ x64_alu_reg_imm_size_body((inst), (opc), (reg), (imm), (size))
+
+#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) \
+ x64_alu_reg_reg_size_body((inst), (opc), (dreg), (reg), (size))
+
+#endif /*__native_client_codegen__*/
+
+#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size((inst),(opc),(reg),(imm),8)
+
+#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size ((inst),(opc),(dreg),(reg),8)
+
+#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),(reg),0,(basereg)); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x64_membase_emit (inst, reg, basereg, disp); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define x64_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (regp)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (dreg), 0, (reg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_mem_body(inst,reg,mem,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, 0); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_address_byte ((inst), 0, (reg), 4); \
+ x86_address_byte ((inst), 0, 4, 5); \
+ x86_imm_emit32 ((inst), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* We have to re-base memory reads because memory isn't zero based. */
+#define x64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x64_mov_reg_membase((inst),(reg),X64_R15,(mem),(size)); \
+ } while (0)
+#else
+#define x64_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x64_mov_reg_mem_body((inst),(reg),(mem),(size)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x64_mov_reg_membase_body(inst,reg,basereg,disp,size) \
+ do { \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x64_mov_reg_memindex_size_body(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); \
+ x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+
+#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), 4); \
+ /* Add %r15 using LEA to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_mov_reg_memindex_size_body((inst), (reg), (basereg), (disp), (indexreg), (shift), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ /* Clear upper 32 bits with mov of size 4 */ \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), 4); \
+ /* Add %r15 */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) \
+ x64_mov_reg_memindex_size_body((inst),(reg),(basereg),(disp),(indexreg),(shift),(size))
+#define x64_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_mov_reg_membase_body((inst), (reg), (basereg), (disp), (size)); \
+ } while (0)
+
+#endif /*__native_client_codegen__*/
+
+#define x64_movzx_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, (size), (reg), 0, (basereg)); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb6; break; \
+ case 2: *(inst)++ = (unsigned char)0x0f; *(inst)++ = (unsigned char)0xb7; break; \
+ case 4: case 8: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_mem(inst,reg,mem) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,0); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_mem_emit ((inst), ((reg)&0x7), (mem)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,(basereg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_membase_emit ((inst), ((reg)&0x7), ((basereg)&0x7), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_memindex(inst, reg, basereg, disp, indexreg, shift) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(reg),0,(basereg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsxd_reg_reg(inst,dreg,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst,8,(dreg),0,(reg)); \
+ *(inst)++ = (unsigned char)0x63; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* Pretty much the only instruction that supports a 64-bit immediate. Optimize for common case of
+ * 32-bit immediate. Pepper with casts to avoid warnings.
+ */
+#define x64_mov_reg_imm_size(inst,reg,imm,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, (size), 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xb8 + ((reg) & 0x7); \
+ if ((size) == 8) \
+ x86_imm_emit64 ((inst), (uint64_t)(imm)); \
+ else \
+ x86_imm_emit32 ((inst), (int)(uint64_t)(imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_mov_reg_imm(inst,reg,imm) \
+ do { \
+ int _x64_width_temp = ((uint64_t)(imm) == (uint64_t)(int)(uint64_t)(imm)); \
+ x64_codegen_pre(inst); \
+ x64_mov_reg_imm_size ((inst), (reg), (imm), (_x64_width_temp ? 4 : 8)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_set_reg_template(inst,reg) x64_mov_reg_imm_size ((inst),(reg), 0, 8)
+
+#define x64_set_template(inst,reg) x64_set_reg_template((inst),(reg))
+
+#define x64_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((size) == 2) \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ x64_emit_rex(inst, (size) == 1 ? 0 : (size), 0, 0, (basereg)); \
+ if ((size) == 1) { \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg) & 0x7, (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+
+#define x64_lea_membase_body(inst,reg,basereg,disp) \
+ do { \
+ x64_emit_rex(inst, 8, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+/* NaCl modules may not write directly into RSP/RBP. Instead, use a */
+/* 32-bit LEA and add R15 to the effective address */
+#define x64_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg(reg)) { \
+ /* 32-bit LEA */ \
+ x64_emit_rex((inst), 4, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x64_membase_emit((inst), (reg), (basereg), (disp)); \
+ /* Use a 64-bit LEA instead of an ADD to preserve flags */ \
+ x64_lea_memindex_size((inst), (reg), (reg), 0, X64_R15, 0, 8); \
+ } else { \
+ x64_lea_membase_body((inst), (reg), (basereg), (disp)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+#else
+#define x64_lea_membase(inst,reg,basereg,disp) \
+ x64_lea_membase_body((inst), (reg), (basereg), (disp))
+#endif /*__native_client_codegen__*/
+
+/* Instruction are implicitly 64-bits so don't generate REX for just the size. */
+#define x64_push_reg(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x50 + ((reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* Instruction is implicitly 64-bits so don't generate REX for just the size. */
+#define x64_push_membase(inst,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (basereg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_pop_reg_body(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0x58 + ((reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ x64_emit_rex ((inst),0,0,0,(reg)); \
+ x86_jump_reg((inst),((reg)&0x7)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_mem_size(inst,mem,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_mov_reg_mem((inst), (mem), X64_R11, 4); \
+ x64_jump_reg_size((inst), X64_R11, 4); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+#define x64_call_reg_internal(inst,reg) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_alu_reg_imm_size((inst), X86_AND, (reg), (nacl_align_byte), 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ x64_emit_rex((inst), 0, 0, 0, (reg)); \
+ x86_call_reg((inst), ((reg) & 0x7)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+#define x64_call_reg(inst,reg) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre(inst); \
+ x64_call_reg_internal((inst), (reg)); \
+ x64_call_sequence_post(inst); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+
+#define x64_ret(inst) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_jump_reg_size((inst), X64_R11, 8); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_leave(inst) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_mov_reg_reg((inst), X64_RSP, X64_RBP, 8); \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_mov_reg_reg_size((inst), X64_RBP, X64_R11, 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, X64_RBP, X64_R15, 8); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_pop_reg(inst,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ if (x64_is_nacl_stack_reg((reg))) { \
+ x64_pop_reg_body((inst), X64_R11); \
+ x64_mov_reg_reg_size((inst), (reg), X64_R11, 4); \
+ x64_alu_reg_reg_size((inst), X86_ADD, (reg), X64_R15, 8); \
+ } else { \
+ x64_pop_reg_body((inst), (reg)); \
+ } \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#else
+
+#define x64_call_reg(inst,reg) \
+ do { \
+ x64_emit_rex(inst, 0, 0, 0, (reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, ((reg) & 0x7)); \
+ } while (0)
+
+
+#define x64_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+#define x64_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+
+#define x64_pop_reg(inst,reg) x64_pop_reg_body((inst), (reg))
+
+#endif /*__native_client_codegen__*/
+
+#define x64_movsd_reg_regp(inst,reg,regp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_regp_reg(inst,regp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_reg_regp(inst,reg,regp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_regp_reg(inst,regp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (regp)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_regp_emit ((inst), (reg) & 0x7, (regp) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movsd_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf2); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_movss_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), 0xf3); \
+ x64_emit_rex(inst, 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg) & 0x7, (basereg) & 0x7, (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+/* The original inc_reg opcode is used as the REX prefix */
+#define x64_inc_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),0,(reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_dec_reg_size(inst,reg,size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),(size),0,0,(reg)); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst),1,(reg) & 0x7); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst),0,0,0,(basereg)); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x64_membase_emit ((inst), 0, (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#if defined(__native_client_codegen__)
+
+/* The 3-7 byte NOP sequences in x64_padding_size below are all illegal in */
+/* 64-bit Native Client because they load into rSP/rBP or use duplicate */
+/* prefixes. Instead we use the NOPs recommended in Section 3.5.1.8 of the */
+/* Intel64 and IA-32 Architectures Optimization Reference Manual and */
+/* Section 4.13 of AMD Software Optimization Guide for Family 10h Processors. */
+
+#define x64_padding_size(inst,size) \
+ do { \
+ unsigned char *code_start = (inst); \
+ switch ((size)) { \
+ /* xchg %eax,%eax, recognized by hardware as a NOP */ \
+ case 1: *(inst)++ = 0x90; break; \
+ /* xchg %ax,%ax */ \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; \
+ break; \
+ /* nop (%rax) */ \
+ case 3: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ *(inst)++ = 0x00; \
+ break; \
+ /* nop 0x0(%rax) */ \
+ case 4: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) */ \
+ case 5: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nopw 0x0(%rax,%rax) */ \
+ case 6: *(inst)++ = 0x66; *(inst)++ = 0x0f; \
+ *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 1, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit8 ((inst), 0); \
+ break; \
+ /* nop 0x0(%rax) (32-bit displacement) */ \
+ case 7: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, X64_RAX); \
+ x86_imm_emit32((inst), 0); \
+ break; \
+ /* nop 0x0(%rax,%rax) (32-bit displacement) */ \
+ case 8: *(inst)++ = 0x0f; *(inst)++ = 0x1f; \
+ x86_address_byte ((inst), 2, 0, 4); \
+ x86_address_byte ((inst), 0, X64_RAX, X64_RAX); \
+ x86_imm_emit32 ((inst), 0); \
+ break; \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ g_assert(code_start + (size) == (unsigned char *)(inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define x64_call_membase_size(inst,basereg,disp,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre(inst); \
+ x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \
+ x64_call_reg_internal((inst), X64_R11); \
+ x64_call_sequence_post(inst); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_jump_membase_size(inst,basereg,disp,size) \
+ do { \
+ x64_mov_reg_membase((inst), X64_R11, (basereg), (disp), 4); \
+ x64_jump_reg_size((inst), X64_R11, 4); \
+ } while (0)
+
+/* On Native Client we can't jump more than INT_MAX in either direction */
+#define x64_jump_code_size(inst,target,size) \
+ do { \
+ /* x86_jump_code used twice in case of */ \
+ /* relocation by x64_codegen_post */ \
+ uint8_t* jump_start; \
+ x64_codegen_pre(inst); \
+ assert(x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))); \
+ x86_jump_code((inst),(target)); \
+ inst = x64_codegen_post(inst); \
+ jump_start = (inst); \
+ x86_jump_code((inst),(target)); \
+ mono_x64_patch(jump_start, (target)); \
+} while (0)
+
+#else
+
+/* From the AMD64 Software Optimization Manual */
+#define x64_padding_size(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: *(inst)++ = 0x90; break; \
+ case 2: *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ case 3: *(inst)++ = 0x66; *(inst)++ = 0x66; *(inst)++ = 0x90; break; \
+ default: x64_emit_rex ((inst),8,0,0,0); x86_padding ((inst), (size) - 1); \
+ }; \
+ } while (0)
+
+#define x64_call_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst),2, (basereg),(disp)); } while (0)
+#define x64_jump_membase_size(inst,basereg,disp,size) do { x64_emit_rex ((inst),0,0,0,(basereg)); *(inst)++ = (unsigned char)0xff; x64_membase_emit ((inst), 4, (basereg), (disp)); } while (0)
+
+#define x64_jump_code_size(inst,target,size) do { \
+ if (x64_is_imm32 ((int64_t)(target) - (int64_t)(inst))) { \
+ x86_jump_code((inst),(target)); \
+ } else { \
+ x64_jump_membase ((inst), X64_RIP, 0); \
+ *(uint64_t*)(inst) = (uint64_t)(target); \
+ (inst) += 8; \
+ } \
+} while (0)
+
+#endif /*__native_client_codegen__*/
+
+/*
+ * SSE
+ */
+
+//TODO Reorganize SSE opcode defines.
+
+/* Two opcode SSE defines */
+#define emit_sse_reg_reg_op2(inst, dreg, reg, op1, op2) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), (op1), (op2), 0)
+
+#define emit_sse_reg_reg_op2_size(inst, dreg, reg, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_reg_reg_op2_imm(inst, dreg, reg, op1, op2, imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ emit_sse_reg_reg_op2 ((inst), (dreg), (reg), (op1), (op2)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_membase_reg_op2(inst, basereg, disp, reg, op1, op2) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), (op1), (op2), 0)
+
+#define emit_sse_membase_reg_op2_size(inst, basereg, disp, reg, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), (size), (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_memindex_reg_op2(inst, basereg, disp, indexreg, shift, reg, op1, op2) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex (inst, 0, (reg), (indexreg), (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_memindex_emit((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while(0)
+
+#define emit_sse_reg_membase_op2(inst, dreg, basereg, disp, op1, op2) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), (op1), (op2), 0)
+
+#define emit_sse_reg_membase_op2_size(inst, dreg, basereg, disp, op1, op2, size) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex ((inst), (size), (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define emit_sse_reg_memindex_op2(inst, dreg, basereg, disp, indexreg, shift, op1, op2) \
+ do { \
+ x64_codegen_pre(inst); \
+ x64_emit_rex (inst, 0, (dreg), (indexreg), (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ x64_memindex_emit((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \
+ x64_codegen_post(inst); \
+ } while(0)
+
+/* Three opcode SSE defines */
+#define emit_opcode3(inst,op1,op2,op3) do { \
+ *(inst)++ = (unsigned char)(op1); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+} while (0)
+
+#define emit_sse_reg_reg_size(inst,dreg,reg,op1,op2,op3,size) do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)(op1); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg(inst,dreg,reg,op1,op2,op3) emit_sse_reg_reg_size ((inst), (dreg), (reg), (op1), (op2), (op3), 0)
+
+#define emit_sse_reg_reg_imm(inst,dreg,reg,op1,op2,op3,imm) do { \
+ x64_codegen_pre(inst); \
+ emit_sse_reg_reg ((inst), (dreg), (reg), (op1), (op2), (op3)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_membase_reg(inst,basereg,disp,reg,op1,op2,op3) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), 0, (reg), 0, (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x64_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_membase(inst,dreg,basereg,disp,op1,op2,op3) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), 0, (dreg), 0, (basereg) == X64_RIP ? 0 : (basereg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ x64_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+/* Four opcode SSE defines */
+
+#define emit_sse_reg_reg_op4_size(inst,dreg,reg,op1,op2,op3,op4,size) do { \
+ x64_codegen_pre(inst); \
+ x86_prefix((inst), (unsigned char)(op1)); \
+ x64_emit_rex ((inst), size, (dreg), 0, (reg)); \
+ *(inst)++ = (unsigned char)(op2); \
+ *(inst)++ = (unsigned char)(op3); \
+ *(inst)++ = (unsigned char)(op4); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x64_codegen_post(inst); \
+} while (0)
+
+#define emit_sse_reg_reg_op4(inst,dreg,reg,op1,op2,op3,op4) emit_sse_reg_reg_op4_size ((inst), (dreg), (reg), (op1), (op2), (op3), (op4), 0)
+
+/* specific SSE opcode defines */
+
+#define x64_sse_xorpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg), 0x66, 0x0f, 0x57)
+
+#define x64_sse_xorpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x57)
+
+#define x64_sse_andpd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst),(dreg),(basereg), (disp), 0x66, 0x0f, 0x54)
+
+#define x64_sse_movsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x10)
+
+#define x64_sse_movsd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf2, 0x0f, 0x10)
+
+#define x64_sse_movsd_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf2, 0x0f, 0x11)
+
+#define x64_sse_movss_membase_reg(inst,basereg,disp,reg) emit_sse_membase_reg ((inst), (basereg), (disp), (reg), 0xf3, 0x0f, 0x11)
+
+#define x64_sse_movss_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0xf3, 0x0f, 0x10)
+
+#define x64_sse_comisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2f)
+
+#define x64_sse_comisd_reg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase ((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x2f)
+
+#define x64_sse_ucomisd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst),(dreg),(reg),0x66,0x0f,0x2e)
+
+#define x64_sse_cvtsd2si_reg_reg(inst,dreg,reg) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2d, 8)
+
+#define x64_sse_cvttsd2si_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2c, (size))
+
+#define x64_sse_cvttsd2si_reg_reg(inst,dreg,reg) x64_sse_cvttsd2si_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsi2sd_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf2, 0x0f, 0x2a, (size))
+
+#define x64_sse_cvtsi2sd_reg_reg(inst,dreg,reg) x64_sse_cvtsi2sd_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsi2ss_reg_reg_size(inst,dreg,reg,size) emit_sse_reg_reg_size ((inst), (dreg), (reg), 0xf3, 0x0f, 0x2a, (size))
+
+#define x64_sse_cvtsi2ss_reg_reg(inst,dreg,reg) x64_sse_cvtsi2ss_reg_reg_size ((inst), (dreg), (reg), 8)
+
+#define x64_sse_cvtsd2ss_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5a)
+
+#define x64_sse_cvtss2sd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf3, 0x0f, 0x5a)
+
+#define x64_sse_addsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x58)
+
+#define x64_sse_subsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5c)
+
+#define x64_sse_mulsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x59)
+
+#define x64_sse_divsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg ((inst), (dreg), (reg), 0xf2, 0x0f, 0x5e)
+
+#define x64_sse_sqrtsd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x51)
+
+
+#define x64_sse_pinsrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc4, (imm))
+
+#define x64_sse_pextrw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm ((inst), (dreg), (reg), 0x66, 0x0f, 0xc5, (imm))
+
+
+#define x64_sse_cvttsd2si_reg_xreg_size(inst,reg,xreg,size) emit_sse_reg_reg_size ((inst), (reg), (xreg), 0xf2, 0x0f, 0x2c, (size))
+
+
+#define x64_sse_addps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x58)
+
+#define x64_sse_addps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x58, size)
+
+#define x64_sse_divps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5e)
+
+#define x64_sse_mulps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x59)
+
+#define x64_sse_mulps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x59, size)
+
+#define x64_sse_subps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5c)
+
+#define x64_sse_subps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x5c, size)
+
+#define x64_sse_maxps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5f)
+
+#define x64_sse_minps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x5d)
+
+#define x64_sse_cmpps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xc2, (imm))
+
+#define x64_sse_andps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x54)
+
+#define x64_sse_andnps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x55)
+
+#define x64_sse_orps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x56)
+
+#define x64_sse_xorps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x57)
+
+#define x64_sse_sqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x51)
+
+#define x64_sse_rsqrtps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x52)
+
+#define x64_sse_rcpps_reg_reg(inst,dreg,reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x53)
+
+#define x64_sse_addsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0xd0)
+
+#define x64_sse_haddps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7c)
+
+#define x64_sse_hsubps_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x7d)
+
+#define x64_sse_movshdup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x16)
+
+#define x64_sse_movsldup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf3, 0x0f, 0x12)
+
+
+#define x64_sse_pshufhw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf3, 0x0f, 0x70, (imm))
+
+#define x64_sse_pshuflw_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0xf2, 0x0f, 0x70, (imm))
+
+#define x64_sse_pshufd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0x70, (imm))
+
+#define x64_sse_shufps_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_op2_imm((inst), (dreg), (reg), 0x0f, 0xC6, (imm))
+
+#define x64_sse_shufpd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xC6, (imm))
+
+
+#define x64_sse_addpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x58)
+
+#define x64_sse_divpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5e)
+
+#define x64_sse_mulpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x59)
+
+#define x64_sse_subpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5c)
+
+#define x64_sse_maxpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5f)
+
+#define x64_sse_minpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x5d)
+
+#define x64_sse_cmppd_reg_reg_imm(inst,dreg,reg,imm) emit_sse_reg_reg_imm((inst), (dreg), (reg), 0x66, 0x0f, 0xc2, (imm))
+
+#define x64_sse_andpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x54)
+
+#define x64_sse_andnpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x55)
+
+#define x64_sse_orpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x56)
+
+#define x64_sse_sqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x51)
+
+#define x64_sse_rsqrtpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x52)
+
+#define x64_sse_rcppd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x53)
+
+#define x64_sse_addsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd0)
+
+#define x64_sse_haddpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7c)
+
+#define x64_sse_hsubpd_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x7d)
+
+#define x64_sse_movddup_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xf2, 0x0f, 0x12)
+
+
+#define x64_sse_pmovmskb_reg_reg(inst,dreg,reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd7)
+
+
+#define x64_sse_pand_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdb)
+
+#define x64_sse_por_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xeb)
+
+#define x64_sse_pxor_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xef)
+
+
+#define x64_sse_paddb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfc)
+
+#define x64_sse_paddw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfd)
+
+#define x64_sse_paddd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfe)
+
+#define x64_sse_paddq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd4)
+
+
+#define x64_sse_psubb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf8)
+
+#define x64_sse_psubw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf9)
+
+#define x64_sse_psubd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfa)
+
+#define x64_sse_psubq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xfb)
+
+
+#define x64_sse_pmaxub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xde)
+
+#define x64_sse_pmaxuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3e)
+
+#define x64_sse_pmaxud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3f)
+
+
+#define x64_sse_pmaxsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3c)
+
+#define x64_sse_pmaxsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xee)
+
+#define x64_sse_pmaxsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3d)
+
+
+#define x64_sse_pavgb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe0)
+
+#define x64_sse_pavgw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define x64_sse_pminub_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xda)
+
+#define x64_sse_pminuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3a)
+
+#define x64_sse_pminud_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x3b)
+
+
+#define x64_sse_pminsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x38)
+
+#define x64_sse_pminsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xea)
+
+#define x64_sse_pminsd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x39)
+
+
+#define x64_sse_pcmpeqb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x74)
+
+#define x64_sse_pcmpeqw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x75)
+
+#define x64_sse_pcmpeqd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x76)
+
+#define x64_sse_pcmpeqq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x29)
+
+
+#define x64_sse_pcmpgtb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x64)
+
+#define x64_sse_pcmpgtw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x65)
+
+#define x64_sse_pcmpgtd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x66)
+
+#define x64_sse_pcmpgtq_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x37)
+
+
+#define x64_sse_psadbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf6)
+
+
+#define x64_sse_punpcklbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x60)
+
+#define x64_sse_punpcklwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x61)
+
+#define x64_sse_punpckldq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x62)
+
+#define x64_sse_punpcklqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6c)
+
+#define x64_sse_unpcklpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x14)
+
+#define x64_sse_unpcklps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x14)
+
+
+#define x64_sse_punpckhbw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x68)
+
+#define x64_sse_punpckhwd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x69)
+
+#define x64_sse_punpckhdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6a)
+
+#define x64_sse_punpckhqdq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6d)
+
+#define x64_sse_unpckhpd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x15)
+
+#define x64_sse_unpckhps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x15)
+
+
+#define x64_sse_packsswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x63)
+
+#define x64_sse_packssdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6b)
+
+#define x64_sse_packuswb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x67)
+
+#define x64_sse_packusdw_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x2b)
+
+
+#define x64_sse_paddusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdc)
+
+#define x64_sse_psubusb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+#define x64_sse_paddusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xdd)
+
+#define x64_sse_psubusw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd8)
+
+
+#define x64_sse_paddsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xec)
+
+#define x64_sse_psubsb_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe8)
+
+#define x64_sse_paddsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xed)
+
+#define x64_sse_psubsw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe9)
+
+
+#define x64_sse_pmullw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd5)
+
+#define x64_sse_pmulld_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op4((inst), (dreg), (reg), 0x66, 0x0f, 0x38, 0x40)
+
+#define x64_sse_pmuludq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf4)
+
+#define x64_sse_pmulhuw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe4)
+
+#define x64_sse_pmulhw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe5)
+
+
+#define x64_sse_psrlw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psrlw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd1)
+
+
+#define x64_sse_psraw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psraw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe1)
+
+
+#define x64_sse_psllw_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x71, (imm))
+
+#define x64_sse_psllw_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf1)
+
+
+#define x64_sse_psrld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_psrld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd2)
+
+
+#define x64_sse_psrad_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_psrad_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe2)
+
+
+#define x64_sse_pslld_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x72, (imm))
+
+#define x64_sse_pslld_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf2)
+
+
+#define x64_sse_psrlq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psrlq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xd3)
+
+
+#define x64_sse_psraq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SAR, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psraq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xe3)
+
+
+#define x64_sse_psllq_reg_imm(inst, reg, imm) emit_sse_reg_reg_imm((inst), X86_SSE_SHL, (reg), 0x66, 0x0f, 0x73, (imm))
+
+#define x64_sse_psllq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0xf3)
+
+
+#define x64_sse_cvtdq2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0xE6)
+
+#define x64_sse_cvtdq2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5B)
+
+#define x64_sse_cvtpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF2, 0x0F, 0xE6)
+
+#define x64_sse_cvtpd2ps_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5A)
+
+#define x64_sse_cvtps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0x5B)
+
+#define x64_sse_cvtps2pd_reg_reg(inst, dreg, reg) emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0F, 0x5A)
+
+#define x64_sse_cvttpd2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0F, 0xE6)
+
+#define x64_sse_cvttps2dq_reg_reg(inst, dreg, reg) emit_sse_reg_reg((inst), (dreg), (reg), 0xF3, 0x0F, 0x5B)
+
+
+#define x64_movd_xreg_reg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (dreg), (sreg), 0x66, 0x0f, 0x6e, (size))
+
+#define x64_movd_reg_xreg_size(inst,dreg,sreg,size) emit_sse_reg_reg_size((inst), (sreg), (dreg), 0x66, 0x0f, 0x7e, (size))
+
+#define x64_movd_xreg_membase(inst,dreg,basereg,disp) emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6e)
+
+#define x64_sse_movhlps_reg_reg(inst, dreg, sreg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x12)
+
+#define x64_sse_movhlps_reg_reg_size(inst, dreg, sreg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x12, size)
+
+#define x64_sse_movlhps_reg_reg(inst, dreg, sreg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (sreg), 0x0f, 0x16)
+
+#define x64_sse_movlhps_reg_reg_size(inst, dreg, sreg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (sreg), 0x0f, 0x16, size)
+
+#define x64_sse_movups_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x11)
+
+#define x64_sse_movups_membase_reg_size(inst, basereg, disp, reg, size) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x11, (size))
+
+#define x64_sse_movups_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x10)
+
+#define x64_sse_movups_reg_membase_size(inst, dreg, basereg, disp, size) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x10, (size))
+
+#define x64_sse_movaps_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x29)
+
+#define x64_sse_movaps_membase_reg_size(inst, basereg, disp, reg, size) \
+ emit_sse_membase_reg_op2_size((inst), (basereg), (disp), (reg), 0x0f, 0x29, (size))
+
+#define x64_sse_movaps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \
+ emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x29);
+
+#define x64_sse_movaps_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (dreg), (basereg), (disp), 0x0f, 0x28)
+
+#define x64_sse_movaps_reg_membase_size(inst, dreg, basereg, disp, size) \
+ emit_sse_reg_membase_op2_size((inst), (dreg), (basereg), (disp), 0x0f, 0x28, (size))
+
+#define x64_sse_movaps_reg_memindex(inst, dreg, basereg, disp, indexreg, shift) \
+ emit_sse_reg_memindex_op2((inst), (dreg), (basereg), (disp), (indexreg), (shift), 0x0f, 0x28);
+
+#define x64_sse_movaps_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg_op2((inst), (dreg), (reg), 0x0f, 0x28)
+
+#define x64_sse_movaps_reg_reg_size(inst, dreg, reg, size) \
+ emit_sse_reg_reg_op2_size((inst), (dreg), (reg), 0x0f, 0x28, size)
+
+#define x64_sse_movntps_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg_op2((inst), (basereg), (disp), (reg), 0x0f, 0x2b)
+
+#define x64_sse_movntps_memindex_reg(inst, basereg, disp, indexreg, shift, reg) \
+ emit_sse_memindex_reg_op2((inst), (basereg), (disp), (indexreg), (shift), (reg), 0x0f, 0x2b)
+
+#define x64_sse_prefetch_reg_membase(inst, arg, basereg, disp) \
+ emit_sse_reg_membase_op2((inst), (arg), (basereg), (disp), 0x0f, 0x18)
+
+#define x64_sse_movdqa_membase_reg(inst, basereg, disp, reg) \
+ emit_sse_membase_reg((inst), (basereg), (disp), (reg), 0x66, 0x0f, 0x7f)
+
+#define x64_sse_movdqa_reg_membase(inst, dreg, basereg, disp) \
+ emit_sse_reg_membase((inst), (dreg), (basereg), (disp), 0x66, 0x0f, 0x6f)
+
+#define x64_sse_movdqa_reg_reg(inst, dreg, reg) \
+ emit_sse_reg_reg((inst), (dreg), (reg), 0x66, 0x0f, 0x6f)
+
+/* Generated from x86-codegen.h */
+
+#define x64_breakpoint_size(inst,size) do { x86_breakpoint(inst); } while (0)
+#define x64_cld_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_cld(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosb(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosl(inst); x64_codegen_post(inst); } while (0)
+#define x64_stosd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_stosd(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsb_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsb(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsl_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsl(inst); x64_codegen_post(inst); } while (0)
+#define x64_movsd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_movsd(inst); x64_codegen_post(inst); } while (0)
+#define x64_prefix_size(inst,p,size) do { x86_prefix((inst), p); } while (0)
+#define x64_rdtsc_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_rdtsc(inst); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmpxchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmpxchg_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_cmpxchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xchg_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xchg_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_xchg_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xchg_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_inc_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_inc_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_inc_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_inc_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+//#define x64_inc_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_inc_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_dec_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_dec_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_dec_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_dec_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+//#define x64_dec_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_dec_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_not_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_not_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_not_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_not_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_not_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_not_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_neg_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_neg_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_neg_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_neg_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_neg_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_neg_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_nop_size(inst,size) do { x64_codegen_pre(inst); x86_nop(inst); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_alu_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase8_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_alu_membase8_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_alu_mem_reg_size(inst,opc,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_mem_reg((inst),(opc),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_membase_reg((inst),(opc),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_reg_size(inst,opc,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg_reg((inst),(opc),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_alu_reg8_reg8((inst),(opc),((dreg)&0x7),((reg)&0x7),(is_dreg_h),(is_reg_h)); x64_codegen_post(inst); } while (0)
+#define x64_alu_reg_mem_size(inst,opc,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_alu_reg_mem((inst),(opc),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_alu_reg_membase((inst),(opc),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_test_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_test_mem_imm((inst),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_test_membase_imm((inst),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_test_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_test_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_test_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_test_mem_reg((inst),(mem),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_test_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_test_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shift_reg_imm_size(inst,opc,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg_imm((inst),(opc),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_mem_imm_size(inst,opc,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem_imm((inst),(opc),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_shift_membase_imm((inst),(opc),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_shift_reg_size(inst,opc,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_reg((inst),(opc),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shift_mem_size(inst,opc,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_shift_mem((inst),(opc),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_shift_membase_size(inst,opc,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_shift_membase((inst),(opc),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_shrd_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shrd_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shrd_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0)
+#define x64_shld_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_shld_reg_imm_size(inst,dreg,reg,shamt,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_shld_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(shamt)); x64_codegen_post(inst); } while (0)
+#define x64_mul_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mul_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mul_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mul_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mul_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mul_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg((inst),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_imul_reg_reg_imm((inst),((dreg)&0x7),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_mem_imm_size(inst,reg,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_imul_reg_mem_imm((inst),((reg)&0x7),(mem),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_imul_reg_membase_imm((inst),((reg)&0x7),((basereg)&0x7),(disp),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_div_reg_size(inst,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_div_reg((inst),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_div_mem_size(inst,mem,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_div_mem((inst),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_div_membase_size(inst,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_div_membase((inst),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_mov_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_mem_reg((inst),(mem),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_regp_reg_size(inst,regp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(regp),0,(reg)); x86_mov_regp_reg((inst),(regp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_memindex_reg((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_mov_reg_reg((inst),((dreg)&0x7),((reg)&0x7),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_mem((inst),((reg)&0x7),(mem),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_mov_reg_membase((inst),((reg)&0x7),((basereg)&0x7),(disp),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_mov_reg_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_clear_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_clear_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_reg_imm_size(inst,reg,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_mov_reg_imm((inst),((reg)&0x7),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_mov_mem_imm_size(inst,mem,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_mov_mem_imm((inst),(mem),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+//#define x64_mov_membase_imm_size(inst,basereg,disp,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_mov_membase_imm((inst),((basereg)&0x7),(disp),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_mov_memindex_imm((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(imm),(size) == 8 ? 4 : (size)); x64_codegen_post(inst); } while (0)
+#define x64_lea_mem_size(inst,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_lea_mem((inst),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_lea_membase_size(inst,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_lea_membase((inst),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),(indexreg),(basereg)); x86_lea_memindex((inst),((reg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0)
+#define x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_widen_reg((inst),((dreg)&0x7),((reg)&0x7),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,0); x86_widen_mem((inst),((dreg)&0x7),(mem),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(basereg)); x86_widen_membase((inst),((dreg)&0x7),((basereg)&0x7),(disp),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),(indexreg),(basereg)); x86_widen_memindex((inst),((dreg)&0x7),((basereg)&0x7),(disp),((indexreg)&0x7),(shift),(is_signed),(is_half)); x64_codegen_post(inst); } while (0)
+#define x64_cdq_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_cdq(inst); x64_codegen_post(inst); } while (0)
+#define x64_wait_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_wait(inst); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_mem_size(inst,opc,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_mem((inst),(opc),(mem),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_size(inst,opc,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op((inst),(opc),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fp_op_reg_size(inst,opc,index,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fp_op_reg((inst),(opc),(index),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fp_int_op_membase((inst),(opc),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0)
+#define x64_fstp_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fstp((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcompp(inst); x64_codegen_post(inst); } while (0)
+#define x64_fucompp_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucompp(inst); x64_codegen_post(inst); } while (0)
+#define x64_fnstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstsw(inst); x64_codegen_post(inst); } while (0)
+#define x64_fnstcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fnstcw((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fnstcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fnstcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fldcw_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldcw((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fldcw_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fldcw_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fchs_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fchs(inst); x64_codegen_post(inst); } while (0)
+#define x64_frem_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_frem(inst); x64_codegen_post(inst); } while (0)
+#define x64_fxch_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fxch((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomi((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fcomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fcomip((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fucomi_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomi((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fucomip_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fucomip((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fld_size(inst,mem,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld((inst),(mem),(is_double)); x64_codegen_post(inst); } while (0)
+//#define x64_fld_membase_size(inst,basereg,disp,is_double,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fld_membase((inst),((basereg)&0x7),(disp),(is_double)); x64_codegen_post(inst); } while (0)
+#define x64_fld80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld80_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fld80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_fld80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fild_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fild((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fild_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fild_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fld_reg_size(inst,index,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld_reg((inst),(index)); x64_codegen_post(inst); } while (0)
+#define x64_fldz_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldz(inst); x64_codegen_post(inst); } while (0)
+#define x64_fld1_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fld1(inst); x64_codegen_post(inst); } while (0)
+#define x64_fldpi_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fldpi(inst); x64_codegen_post(inst); } while (0)
+#define x64_fst_size(inst,mem,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst((inst),(mem),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst_membase((inst),((basereg)&0x7),(disp),(is_double),(pop_stack)); x64_codegen_post(inst); } while (0)
+#define x64_fst80_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fst80_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_fst80_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fst80_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_fist_pop_size(inst,mem,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_fist_pop((inst),(mem),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fist_pop_membase_size(inst,basereg,disp,is_long,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_pop_membase((inst),((basereg)&0x7),(disp),(is_long)); x64_codegen_post(inst); } while (0)
+#define x64_fstsw_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_fstsw(inst); x64_codegen_post(inst); } while (0)
+#define x64_fist_membase_size(inst,basereg,disp,is_int,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_fist_membase((inst),((basereg)&0x7),(disp),(is_int)); x64_codegen_post(inst); } while (0)
+//#define x64_push_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_push_regp_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_push_regp((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_push_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_push_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+//#define x64_push_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_push_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_push_memindex_size(inst,basereg,disp,indexreg,shift,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,(indexreg),(basereg)); x86_push_memindex((inst),((basereg)&0x7),(disp),((indexreg)&0x7),(shift)); x64_codegen_post(inst); } while (0)
+#define x64_push_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_push_imm((inst),(imm)); x64_codegen_post(inst); } while (0)
+//#define x64_pop_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_pop_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_pop_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pop_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_pop_membase_size(inst,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_pop_membase((inst),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_pushad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushad(inst); x64_codegen_post(inst); } while (0)
+#define x64_pushfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_pushfd(inst); x64_codegen_post(inst); } while (0)
+#define x64_popad_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popad(inst); x64_codegen_post(inst); } while (0)
+#define x64_popfd_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_popfd(inst); x64_codegen_post(inst); } while (0)
+#define x64_loop_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loop((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_loope_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loope((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_loopne_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_loopne((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_jump32_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump32((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_jump8_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_jump8((inst),(imm)); x64_codegen_post(inst); } while (0)
+
+#if !defined( __native_client_codegen__ )
+/* Defined above for Native Client, so they can be used in other macros */
+#define x64_jump_reg_size(inst,reg,size) do { x64_emit_rex ((inst),0,0,0,(reg)); x86_jump_reg((inst),((reg)&0x7)); } while (0)
+#define x64_jump_mem_size(inst,mem,size) do { x64_emit_rex ((inst),(size),0,0,0); x86_jump_mem((inst),(mem)); } while (0)
+#endif
+
+#define x64_jump_disp_size(inst,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,0); x86_jump_disp((inst),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_branch8_size(inst,cond,imm,is_signed,size) do { x86_branch8((inst),(cond),(imm),(is_signed)); } while (0)
+#define x64_branch32_size(inst,cond,imm,is_signed,size) do { x86_branch32((inst),(cond),(imm),(is_signed)); } while (0)
+#define x64_branch_size_body(inst,cond,target,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch((inst),(cond),(target),(is_signed)); x64_codegen_post(inst); } while (0)
+
+#if defined(__native_client_codegen__)
+#define x64_branch_size(inst,cond,target,is_signed,size) \
+ do { \
+ /* x64_branch_size_body used twice in */ \
+ /* case of relocation by x64_codegen_post */ \
+ uint8_t* branch_start; \
+ x64_codegen_pre(inst); \
+ x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ inst = x64_codegen_post(inst); \
+ branch_start = inst; \
+ x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); \
+ mono_x64_patch(branch_start, (target)); \
+ } while (0)
+#else
+#define x64_branch_size(inst,cond,target,is_signed,size) do { x64_branch_size_body((inst),(cond),(target),(is_signed),(size)); } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x64_branch_disp_size(inst,cond,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_branch_disp((inst),(cond),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_reg_size(inst,cond,reg,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex((inst),1,0,0,(reg)); x86_set_reg((inst),(cond),((reg)&0x7),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_mem_size(inst,cond,mem,is_signed,size) do { x64_codegen_pre(inst); x86_set_mem((inst),(cond),(mem),(is_signed)); x64_codegen_post(inst); } while (0)
+#define x64_set_membase_size(inst,cond,basereg,disp,is_signed,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),0,0,0,(basereg)); x86_set_membase((inst),(cond),((basereg)&0x7),(disp),(is_signed)); x64_codegen_post(inst); } while (0)
+//#define x64_call_reg_size(inst,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_call_reg((inst),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_call_mem_size(inst,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_call_mem((inst),(mem)); x64_codegen_post(inst); } while (0)
+
+#if defined(__native_client_codegen__)
+/* Size is ignored for Native Client calls, we restrict jumping to 32-bits */
+#define x64_call_imm_size(inst,disp,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ x64_call_sequence_pre((inst)); \
+ x86_call_imm((inst),(disp)); \
+ x64_call_sequence_post((inst)); \
+ x64_codegen_post((inst)); \
+ } while (0)
+
+/* x86_call_code is called twice below, first so we can get the size of the */
+/* call sequence, and again so the exact offset from "inst" is used, since */
+/* the sequence could have moved from x64_call_sequence_post. */
+/* Size is ignored for Native Client jumps, we restrict jumping to 32-bits */
+#define x64_call_code_size(inst,target,size) \
+ do { \
+ x64_codegen_pre((inst)); \
+ uint8_t* adjusted_start; \
+ uint8_t* call_start; \
+ x64_call_sequence_pre((inst)); \
+ x86_call_code((inst),(target)); \
+ adjusted_start = x64_call_sequence_post((inst)); \
+ call_start = adjusted_start; \
+ x86_call_code(adjusted_start, (target)); \
+ x64_codegen_post((inst)); \
+ mono_x64_patch(call_start, (target)); \
+ } while (0)
+
+#else
+
+#define x64_call_imm_size(inst,disp,size) do { x86_call_imm((inst),(disp)); } while (0)
+#define x64_call_code_size(inst,target,size) do { x86_call_code((inst),(target)); } while (0)
+
+#endif /*__native_client_codegen__*/
+
+//#define x64_ret_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret(inst); x64_codegen_post(inst); } while (0)
+#define x64_ret_imm_size(inst,imm,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_ret_imm((inst),(imm)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_cmov_reg((inst),(cond),(is_signed),((dreg)&0x7),((reg)&0x7)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_mem_size(inst,cond,is_signed,reg,mem,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_cmov_mem((inst),(cond),(is_signed),((reg)&0x7),(mem)); x64_codegen_post(inst); } while (0)
+#define x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(basereg)); x86_cmov_membase((inst),(cond),(is_signed),((reg)&0x7),((basereg)&0x7),(disp)); x64_codegen_post(inst); } while (0)
+#define x64_enter_size(inst,framesize) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_enter((inst),(framesize)); x64_codegen_post(inst); } while (0)
+//#define x64_leave_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_leave(inst); x64_codegen_post(inst); } while (0)
+#define x64_sahf_size(inst,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_sahf(inst); x64_codegen_post(inst); } while (0)
+#define x64_fsin_size(inst,size) do { x64_codegen_pre(inst); x86_fsin(inst); x64_codegen_post(inst); } while (0)
+#define x64_fcos_size(inst,size) do { x64_codegen_pre(inst); x86_fcos(inst); x64_codegen_post(inst); } while (0)
+#define x64_fabs_size(inst,size) do { x64_codegen_pre(inst); x86_fabs(inst); x64_codegen_post(inst); } while (0)
+#define x64_ftst_size(inst,size) do { x64_codegen_pre(inst); x86_ftst(inst); x64_codegen_post(inst); } while (0)
+#define x64_fxam_size(inst,size) do { x64_codegen_pre(inst); x86_fxam(inst); x64_codegen_post(inst); } while (0)
+#define x64_fpatan_size(inst,size) do { x64_codegen_pre(inst); x86_fpatan(inst); x64_codegen_post(inst); } while (0)
+#define x64_fprem_size(inst,size) do { x64_codegen_pre(inst); x86_fprem(inst); x64_codegen_post(inst); } while (0)
+#define x64_fprem1_size(inst,size) do { x64_codegen_pre(inst); x86_fprem1(inst); x64_codegen_post(inst); } while (0)
+#define x64_frndint_size(inst,size) do { x64_codegen_pre(inst); x86_frndint(inst); x64_codegen_post(inst); } while (0)
+#define x64_fsqrt_size(inst,size) do { x64_codegen_pre(inst); x86_fsqrt(inst); x64_codegen_post(inst); } while (0)
+#define x64_fptan_size(inst,size) do { x64_codegen_pre(inst); x86_fptan(inst); x64_codegen_post(inst); } while (0)
+//#define x64_padding_size(inst,size) do { x64_codegen_pre(inst); x86_padding((inst),(size)); x64_codegen_post(inst); } while (0)
+#define x64_prolog_size(inst,frame_size,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_prolog((inst),(frame_size),(reg_mask)); x64_codegen_post(inst); } while (0)
+#define x64_epilog_size(inst,reg_mask,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,0); x86_epilog((inst),(reg_mask)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_reg_reg_size(inst,dreg,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(dreg),0,(reg)); x86_xadd_reg_reg ((inst), (dreg), (reg), (size)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_mem_reg_size(inst,mem,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),0,0,(reg)); x86_xadd_mem_reg((inst),(mem),((reg)&0x7), (size)); x64_codegen_post(inst); } while (0)
+#define x64_xadd_membase_reg_size(inst,basereg,disp,reg,size) do { x64_codegen_pre(inst); x64_emit_rex ((inst),(size),(reg),0,(basereg)); x86_xadd_membase_reg((inst),((basereg)&0x7),(disp),((reg)&0x7),(size)); x64_codegen_post(inst); } while (0)
+
+
+
+
+#define x64_breakpoint(inst) x64_breakpoint_size(inst,8)
+#define x64_cld(inst) x64_cld_size(inst,8)
+#define x64_stosb(inst) x64_stosb_size(inst,8)
+#define x64_stosl(inst) x64_stosl_size(inst,8)
+#define x64_stosd(inst) x64_stosd_size(inst,8)
+#define x64_movsb(inst) x64_movsb_size(inst,8)
+#define x64_movsl(inst) x64_movsl_size(inst,8)
+#define x64_movsd(inst) x64_movsd_size(inst,8)
+#define x64_prefix(inst,p) x64_prefix_size(inst,p,8)
+#define x64_rdtsc(inst) x64_rdtsc_size(inst,8)
+#define x64_cmpxchg_reg_reg(inst,dreg,reg) x64_cmpxchg_reg_reg_size(inst,dreg,reg,8)
+#define x64_cmpxchg_mem_reg(inst,mem,reg) x64_cmpxchg_mem_reg_size(inst,mem,reg,8)
+#define x64_cmpxchg_membase_reg(inst,basereg,disp,reg) x64_cmpxchg_membase_reg_size(inst,basereg,disp,reg,8)
+#define x64_xchg_reg_reg(inst,dreg,reg,size) x64_xchg_reg_reg_size(inst,dreg,reg,size)
+#define x64_xchg_mem_reg(inst,mem,reg,size) x64_xchg_mem_reg_size(inst,mem,reg,size)
+#define x64_xchg_membase_reg(inst,basereg,disp,reg,size) x64_xchg_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_xadd_reg_reg(inst,dreg,reg,size) x64_xadd_reg_reg_size(inst,dreg,reg,size)
+#define x64_xadd_mem_reg(inst,mem,reg,size) x64_xadd_mem_reg_size(inst,mem,reg,size)
+#define x64_xadd_membase_reg(inst,basereg,disp,reg,size) x64_xadd_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_inc_mem(inst,mem) x64_inc_mem_size(inst,mem,8)
+#define x64_inc_membase(inst,basereg,disp) x64_inc_membase_size(inst,basereg,disp,8)
+#define x64_inc_reg(inst,reg) x64_inc_reg_size(inst,reg,8)
+#define x64_dec_mem(inst,mem) x64_dec_mem_size(inst,mem,8)
+#define x64_dec_membase(inst,basereg,disp) x64_dec_membase_size(inst,basereg,disp,8)
+#define x64_dec_reg(inst,reg) x64_dec_reg_size(inst,reg,8)
+#define x64_not_mem(inst,mem) x64_not_mem_size(inst,mem,8)
+#define x64_not_membase(inst,basereg,disp) x64_not_membase_size(inst,basereg,disp,8)
+#define x64_not_reg(inst,reg) x64_not_reg_size(inst,reg,8)
+#define x64_neg_mem(inst,mem) x64_neg_mem_size(inst,mem,8)
+#define x64_neg_membase(inst,basereg,disp) x64_neg_membase_size(inst,basereg,disp,8)
+#define x64_neg_reg(inst,reg) x64_neg_reg_size(inst,reg,8)
+#define x64_nop(inst) x64_nop_size(inst,8)
+//#define x64_alu_reg_imm(inst,opc,reg,imm) x64_alu_reg_imm_size(inst,opc,reg,imm,8)
+#define x64_alu_mem_imm(inst,opc,mem,imm) x64_alu_mem_imm_size(inst,opc,mem,imm,8)
+#define x64_alu_membase_imm(inst,opc,basereg,disp,imm) x64_alu_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define x64_alu_mem_reg(inst,opc,mem,reg) x64_alu_mem_reg_size(inst,opc,mem,reg,8)
+#define x64_alu_membase_reg(inst,opc,basereg,disp,reg) x64_alu_membase_reg_size(inst,opc,basereg,disp,reg,8)
+//#define x64_alu_reg_reg(inst,opc,dreg,reg) x64_alu_reg_reg_size(inst,opc,dreg,reg,8)
+#define x64_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) x64_alu_reg8_reg8_size(inst,opc,dreg,reg,is_dreg_h,is_reg_h,8)
+#define x64_alu_reg_mem(inst,opc,reg,mem) x64_alu_reg_mem_size(inst,opc,reg,mem,8)
+#define x64_alu_reg_membase(inst,opc,reg,basereg,disp) x64_alu_reg_membase_size(inst,opc,reg,basereg,disp,8)
+#define x64_test_reg_imm(inst,reg,imm) x64_test_reg_imm_size(inst,reg,imm,8)
+#define x64_test_mem_imm(inst,mem,imm) x64_test_mem_imm_size(inst,mem,imm,8)
+#define x64_test_membase_imm(inst,basereg,disp,imm) x64_test_membase_imm_size(inst,basereg,disp,imm,8)
+#define x64_test_reg_reg(inst,dreg,reg) x64_test_reg_reg_size(inst,dreg,reg,8)
+#define x64_test_mem_reg(inst,mem,reg) x64_test_mem_reg_size(inst,mem,reg,8)
+#define x64_test_membase_reg(inst,basereg,disp,reg) x64_test_membase_reg_size(inst,basereg,disp,reg,8)
+#define x64_shift_reg_imm(inst,opc,reg,imm) x64_shift_reg_imm_size(inst,opc,reg,imm,8)
+#define x64_shift_mem_imm(inst,opc,mem,imm) x64_shift_mem_imm_size(inst,opc,mem,imm,8)
+#define x64_shift_membase_imm(inst,opc,basereg,disp,imm) x64_shift_membase_imm_size(inst,opc,basereg,disp,imm,8)
+#define x64_shift_reg(inst,opc,reg) x64_shift_reg_size(inst,opc,reg,8)
+#define x64_shift_mem(inst,opc,mem) x64_shift_mem_size(inst,opc,mem,8)
+#define x64_shift_membase(inst,opc,basereg,disp) x64_shift_membase_size(inst,opc,basereg,disp,8)
+#define x64_shrd_reg(inst,dreg,reg) x64_shrd_reg_size(inst,dreg,reg,8)
+#define x64_shrd_reg_imm(inst,dreg,reg,shamt) x64_shrd_reg_imm_size(inst,dreg,reg,shamt,8)
+#define x64_shld_reg(inst,dreg,reg) x64_shld_reg_size(inst,dreg,reg,8)
+#define x64_shld_reg_imm(inst,dreg,reg,shamt) x64_shld_reg_imm_size(inst,dreg,reg,shamt,8)
+#define x64_mul_reg(inst,reg,is_signed) x64_mul_reg_size(inst,reg,is_signed,8)
+#define x64_mul_mem(inst,mem,is_signed) x64_mul_mem_size(inst,mem,is_signed,8)
+#define x64_mul_membase(inst,basereg,disp,is_signed) x64_mul_membase_size(inst,basereg,disp,is_signed,8)
+#define x64_imul_reg_reg(inst,dreg,reg) x64_imul_reg_reg_size(inst,dreg,reg,8)
+#define x64_imul_reg_mem(inst,reg,mem) x64_imul_reg_mem_size(inst,reg,mem,8)
+#define x64_imul_reg_membase(inst,reg,basereg,disp) x64_imul_reg_membase_size(inst,reg,basereg,disp,8)
+#define x64_imul_reg_reg_imm(inst,dreg,reg,imm) x64_imul_reg_reg_imm_size(inst,dreg,reg,imm,8)
+#define x64_imul_reg_mem_imm(inst,reg,mem,imm) x64_imul_reg_mem_imm_size(inst,reg,mem,imm,8)
+#define x64_imul_reg_membase_imm(inst,reg,basereg,disp,imm) x64_imul_reg_membase_imm_size(inst,reg,basereg,disp,imm,8)
+#define x64_div_reg(inst,reg,is_signed) x64_div_reg_size(inst,reg,is_signed,8)
+#define x64_div_mem(inst,mem,is_signed) x64_div_mem_size(inst,mem,is_signed,8)
+#define x64_div_membase(inst,basereg,disp,is_signed) x64_div_membase_size(inst,basereg,disp,is_signed,8)
+//#define x64_mov_mem_reg(inst,mem,reg,size) x64_mov_mem_reg_size(inst,mem,reg,size)
+//#define x64_mov_regp_reg(inst,regp,reg,size) x64_mov_regp_reg_size(inst,regp,reg,size)
+//#define x64_mov_membase_reg(inst,basereg,disp,reg,size) x64_mov_membase_reg_size(inst,basereg,disp,reg,size)
+#define x64_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) x64_mov_memindex_reg_size(inst,basereg,disp,indexreg,shift,reg,size)
+//#define x64_mov_reg_reg(inst,dreg,reg,size) x64_mov_reg_reg_size(inst,dreg,reg,size)
+//#define x64_mov_reg_mem(inst,reg,mem,size) x64_mov_reg_mem_size(inst,reg,mem,size)
+//#define x64_mov_reg_membase(inst,reg,basereg,disp,size) x64_mov_reg_membase_size(inst,reg,basereg,disp,size)
+#define x64_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) x64_mov_reg_memindex_size(inst,reg,basereg,disp,indexreg,shift,size)
+#define x64_clear_reg(inst,reg) x64_clear_reg_size(inst,reg,8)
+//#define x64_mov_reg_imm(inst,reg,imm) x64_mov_reg_imm_size(inst,reg,imm,8)
+#define x64_mov_mem_imm(inst,mem,imm,size) x64_mov_mem_imm_size(inst,mem,imm,size)
+//#define x64_mov_membase_imm(inst,basereg,disp,imm,size) x64_mov_membase_imm_size(inst,basereg,disp,imm,size)
+#define x64_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) x64_mov_memindex_imm_size(inst,basereg,disp,indexreg,shift,imm,size)
+#define x64_lea_mem(inst,reg,mem) x64_lea_mem_size(inst,reg,mem,8)
+//#define x64_lea_membase(inst,reg,basereg,disp) x64_lea_membase_size(inst,reg,basereg,disp,8)
+#define x64_lea_memindex(inst,reg,basereg,disp,indexreg,shift) x64_lea_memindex_size(inst,reg,basereg,disp,indexreg,shift,8)
+#define x64_widen_reg(inst,dreg,reg,is_signed,is_half) x64_widen_reg_size(inst,dreg,reg,is_signed,is_half,8)
+#define x64_widen_mem(inst,dreg,mem,is_signed,is_half) x64_widen_mem_size(inst,dreg,mem,is_signed,is_half,8)
+#define x64_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) x64_widen_membase_size(inst,dreg,basereg,disp,is_signed,is_half,8)
+#define x64_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) x64_widen_memindex_size(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half,8)
+#define x64_cdq(inst) x64_cdq_size(inst,8)
+#define x64_wait(inst) x64_wait_size(inst,8)
+#define x64_fp_op_mem(inst,opc,mem,is_double) x64_fp_op_mem_size(inst,opc,mem,is_double,8)
+#define x64_fp_op_membase(inst,opc,basereg,disp,is_double) x64_fp_op_membase_size(inst,opc,basereg,disp,is_double,8)
+#define x64_fp_op(inst,opc,index) x64_fp_op_size(inst,opc,index,8)
+#define x64_fp_op_reg(inst,opc,index,pop_stack) x64_fp_op_reg_size(inst,opc,index,pop_stack,8)
+#define x64_fp_int_op_membase(inst,opc,basereg,disp,is_int) x64_fp_int_op_membase_size(inst,opc,basereg,disp,is_int,8)
+#define x64_fstp(inst,index) x64_fstp_size(inst,index,8)
+#define x64_fcompp(inst) x64_fcompp_size(inst,8)
+#define x64_fucompp(inst) x64_fucompp_size(inst,8)
+#define x64_fnstsw(inst) x64_fnstsw_size(inst,8)
+#define x64_fnstcw(inst,mem) x64_fnstcw_size(inst,mem,8)
+#define x64_fnstcw_membase(inst,basereg,disp) x64_fnstcw_membase_size(inst,basereg,disp,8)
+#define x64_fldcw(inst,mem) x64_fldcw_size(inst,mem,8)
+#define x64_fldcw_membase(inst,basereg,disp) x64_fldcw_membase_size(inst,basereg,disp,8)
+#define x64_fchs(inst) x64_fchs_size(inst,8)
+#define x64_frem(inst) x64_frem_size(inst,8)
+#define x64_fxch(inst,index) x64_fxch_size(inst,index,8)
+#define x64_fcomi(inst,index) x64_fcomi_size(inst,index,8)
+#define x64_fcomip(inst,index) x64_fcomip_size(inst,index,8)
+#define x64_fucomi(inst,index) x64_fucomi_size(inst,index,8)
+#define x64_fucomip(inst,index) x64_fucomip_size(inst,index,8)
+#define x64_fld(inst,mem,is_double) x64_fld_size(inst,mem,is_double,8)
+#define x64_fld_membase(inst,basereg,disp,is_double) x64_fld_membase_size(inst,basereg,disp,is_double,8)
+#define x64_fld80_mem(inst,mem) x64_fld80_mem_size(inst,mem,8)
+#define x64_fld80_membase(inst,basereg,disp) x64_fld80_membase_size(inst,basereg,disp,8)
+#define x64_fild(inst,mem,is_long) x64_fild_size(inst,mem,is_long,8)
+#define x64_fild_membase(inst,basereg,disp,is_long) x64_fild_membase_size(inst,basereg,disp,is_long,8)
+#define x64_fld_reg(inst,index) x64_fld_reg_size(inst,index,8)
+#define x64_fldz(inst) x64_fldz_size(inst,8)
+#define x64_fld1(inst) x64_fld1_size(inst,8)
+#define x64_fldpi(inst) x64_fldpi_size(inst,8)
+#define x64_fst(inst,mem,is_double,pop_stack) x64_fst_size(inst,mem,is_double,pop_stack,8)
+#define x64_fst_membase(inst,basereg,disp,is_double,pop_stack) x64_fst_membase_size(inst,basereg,disp,is_double,pop_stack,8)
+#define x64_fst80_mem(inst,mem) x64_fst80_mem_size(inst,mem,8)
+#define x64_fst80_membase(inst,basereg,disp) x64_fst80_membase_size(inst,basereg,disp,8)
+#define x64_fist_pop(inst,mem,is_long) x64_fist_pop_size(inst,mem,is_long,8)
+#define x64_fist_pop_membase(inst,basereg,disp,is_long) x64_fist_pop_membase_size(inst,basereg,disp,is_long,8)
+#define x64_fstsw(inst) x64_fstsw_size(inst,8)
+#define x64_fist_membase(inst,basereg,disp,is_int) x64_fist_membase_size(inst,basereg,disp,is_int,8)
+//#define x64_push_reg(inst,reg) x64_push_reg_size(inst,reg,8)
+#define x64_push_regp(inst,reg) x64_push_regp_size(inst,reg,8)
+#define x64_push_mem(inst,mem) x64_push_mem_size(inst,mem,8)
+//#define x64_push_membase(inst,basereg,disp) x64_push_membase_size(inst,basereg,disp,8)
+#define x64_push_memindex(inst,basereg,disp,indexreg,shift) x64_push_memindex_size(inst,basereg,disp,indexreg,shift,8)
+#define x64_push_imm(inst,imm) x64_push_imm_size(inst,imm,8)
+//#define x64_pop_reg(inst,reg) x64_pop_reg_size(inst,reg,8)
+#define x64_pop_mem(inst,mem) x64_pop_mem_size(inst,mem,8)
+#define x64_pop_membase(inst,basereg,disp) x64_pop_membase_size(inst,basereg,disp,8)
+#define x64_pushad(inst) x64_pushad_size(inst,8)
+#define x64_pushfd(inst) x64_pushfd_size(inst,8)
+#define x64_popad(inst) x64_popad_size(inst,8)
+#define x64_popfd(inst) x64_popfd_size(inst,8)
+#define x64_loop(inst,imm) x64_loop_size(inst,imm,8)
+#define x64_loope(inst,imm) x64_loope_size(inst,imm,8)
+#define x64_loopne(inst,imm) x64_loopne_size(inst,imm,8)
+#define x64_jump32(inst,imm) x64_jump32_size(inst,imm,8)
+#define x64_jump8(inst,imm) x64_jump8_size(inst,imm,8)
+#define x64_jump_reg(inst,reg) x64_jump_reg_size(inst,reg,8)
+#define x64_jump_mem(inst,mem) x64_jump_mem_size(inst,mem,8)
+#define x64_jump_membase(inst,basereg,disp) x64_jump_membase_size(inst,basereg,disp,8)
+#define x64_jump_code(inst,target) x64_jump_code_size(inst,target,8)
+#define x64_jump_disp(inst,disp) x64_jump_disp_size(inst,disp,8)
+#define x64_branch8(inst,cond,imm,is_signed) x64_branch8_size(inst,cond,imm,is_signed,8)
+#define x64_branch32(inst,cond,imm,is_signed) x64_branch32_size(inst,cond,imm,is_signed,8)
+#define x64_branch(inst,cond,target,is_signed) x64_branch_size(inst,cond,target,is_signed,8)
+#define x64_branch_disp(inst,cond,disp,is_signed) x64_branch_disp_size(inst,cond,disp,is_signed,8)
+#define x64_set_reg(inst,cond,reg,is_signed) x64_set_reg_size(inst,cond,reg,is_signed,8)
+#define x64_set_mem(inst,cond,mem,is_signed) x64_set_mem_size(inst,cond,mem,is_signed,8)
+#define x64_set_membase(inst,cond,basereg,disp,is_signed) x64_set_membase_size(inst,cond,basereg,disp,is_signed,8)
+#define x64_call_imm(inst,disp) x64_call_imm_size(inst,disp,8)
+//#define x64_call_reg(inst,reg) x64_call_reg_size(inst,reg,8)
+#define x64_call_mem(inst,mem) x64_call_mem_size(inst,mem,8)
+#define x64_call_membase(inst,basereg,disp) x64_call_membase_size(inst,basereg,disp,8)
+#define x64_call_code(inst,target) x64_call_code_size(inst,target,8)
+//#define x64_ret(inst) x64_ret_size(inst,8)
+#define x64_ret_imm(inst,imm) x64_ret_imm_size(inst,imm,8)
+#define x64_cmov_reg(inst,cond,is_signed,dreg,reg) x64_cmov_reg_size(inst,cond,is_signed,dreg,reg,8)
+#define x64_cmov_mem(inst,cond,is_signed,reg,mem) x64_cmov_mem_size(inst,cond,is_signed,reg,mem,8)
+#define x64_cmov_membase(inst,cond,is_signed,reg,basereg,disp) x64_cmov_membase_size(inst,cond,is_signed,reg,basereg,disp,8)
+#define x64_enter(inst,framesize) x64_enter_size(inst,framesize)
+//#define x64_leave(inst) x64_leave_size(inst,8)
+#define x64_sahf(inst) x64_sahf_size(inst,8)
+#define x64_fsin(inst) x64_fsin_size(inst,8)
+#define x64_fcos(inst) x64_fcos_size(inst,8)
+#define x64_fabs(inst) x64_fabs_size(inst,8)
+#define x64_ftst(inst) x64_ftst_size(inst,8)
+#define x64_fxam(inst) x64_fxam_size(inst,8)
+#define x64_fpatan(inst) x64_fpatan_size(inst,8)
+#define x64_fprem(inst) x64_fprem_size(inst,8)
+#define x64_fprem1(inst) x64_fprem1_size(inst,8)
+#define x64_frndint(inst) x64_frndint_size(inst,8)
+#define x64_fsqrt(inst) x64_fsqrt_size(inst,8)
+#define x64_fptan(inst) x64_fptan_size(inst,8)
+#define x64_padding(inst,size) x64_padding_size(inst,size)
+#define x64_prolog(inst,frame,reg_mask) x64_prolog_size(inst,frame,reg_mask,8)
+#define x64_epilog(inst,reg_mask) x64_epilog_size(inst,reg_mask,8)
+
+#endif // X64_H
diff --git a/src/arch/x86/.gitignore b/src/arch/x86/.gitignore
new file mode 100644
index 0000000..341daec
--- /dev/null
+++ b/src/arch/x86/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
diff --git a/src/arch/x86/Makefile.am b/src/arch/x86/Makefile.am
new file mode 100644
index 0000000..bab0f9e
--- /dev/null
+++ b/src/arch/x86/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = x86-codegen.h \ No newline at end of file
diff --git a/src/arch/x86/x86-codegen.h b/src/arch/x86/x86-codegen.h
new file mode 100644
index 0000000..0052076
--- /dev/null
+++ b/src/arch/x86/x86-codegen.h
@@ -0,0 +1,2647 @@
+/*
+ * x86-codegen.h: Macros for generating x86 code
+ *
+ * Authors:
+ * Paolo Molaro (lupus@ximian.com)
+ * Intel Corporation (ORP Project)
+ * Sergey Chaban (serge@wildwestsoftware.com)
+ * Dietmar Maurer (dietmar@ximian.com)
+ * Patrik Torstensson
+ *
+ * Copyright (C) 2000 Intel Corporation. All rights reserved.
+ * Copyright (C) 2001, 2002 Ximian, Inc.
+ */
+
+#ifndef X86_H
+#define X86_H
+
+#include <assert.h>
+
+#ifdef __native_client_codegen__
+extern gint8 nacl_align_byte;
+#endif /* __native_client_codegen__ */
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do { mono_nacl_align_inst(inst_ptr_ptr, inst_len); } while (0)
+
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) \
+ (mono_nacl_align_call(&_code_start, &(inst)), _code_start);
+#define x86_call_sequence_pre(inst) x86_call_sequence_pre_val((inst))
+#define x86_call_sequence_post(inst) x86_call_sequence_post_val((inst))
+#else
+#define x86_codegen_pre(inst_ptr_ptr, inst_len) do {} while (0)
+
+/* Two variants are needed to avoid warnings */
+#define x86_call_sequence_pre_val(inst) guint8* _code_start = (inst);
+#define x86_call_sequence_post_val(inst) _code_start
+#define x86_call_sequence_pre(inst)
+#define x86_call_sequence_post(inst)
+#endif /* __native_client_codegen__ */
+
+/* x86 32bit register numbers */
+typedef enum {
+ X86_EAX = 0,
+ X86_ECX = 1,
+ X86_EDX = 2,
+ X86_EBX = 3,
+ X86_ESP = 4,
+ X86_EBP = 5,
+ X86_ESI = 6,
+ X86_EDI = 7,
+ X86_NREG
+} X86_Reg_No;
+
+typedef enum {
+ X86_XMM0,
+ X86_XMM1,
+ X86_XMM2,
+ X86_XMM3,
+ X86_XMM4,
+ X86_XMM5,
+ X86_XMM6,
+ X86_XMM7,
+ X86_XMM_NREG
+} X86_XMM_Reg_No;
+
+/* opcodes for ALU instructions */
+typedef enum {
+ X86_ADD = 0,
+ X86_OR = 1,
+ X86_ADC = 2,
+ X86_SBB = 3,
+ X86_AND = 4,
+ X86_SUB = 5,
+ X86_XOR = 6,
+ X86_CMP = 7,
+ X86_NALU
+} X86_ALU_Opcode;
+/*
+// opcodes for shift instructions
+*/
+typedef enum {
+ X86_SHLD,
+ X86_SHLR,
+ X86_ROL = 0,
+ X86_ROR = 1,
+ X86_RCL = 2,
+ X86_RCR = 3,
+ X86_SHL = 4,
+ X86_SHR = 5,
+ X86_SAR = 7,
+ X86_NSHIFT = 8
+} X86_Shift_Opcode;
+/*
+// opcodes for floating-point instructions
+*/
+typedef enum {
+ X86_FADD = 0,
+ X86_FMUL = 1,
+ X86_FCOM = 2,
+ X86_FCOMP = 3,
+ X86_FSUB = 4,
+ X86_FSUBR = 5,
+ X86_FDIV = 6,
+ X86_FDIVR = 7,
+ X86_NFP = 8
+} X86_FP_Opcode;
+/*
+// integer conditions codes
+*/
+typedef enum {
+ X86_CC_EQ = 0, X86_CC_E = 0, X86_CC_Z = 0,
+ X86_CC_NE = 1, X86_CC_NZ = 1,
+ X86_CC_LT = 2, X86_CC_B = 2, X86_CC_C = 2, X86_CC_NAE = 2,
+ X86_CC_LE = 3, X86_CC_BE = 3, X86_CC_NA = 3,
+ X86_CC_GT = 4, X86_CC_A = 4, X86_CC_NBE = 4,
+ X86_CC_GE = 5, X86_CC_AE = 5, X86_CC_NB = 5, X86_CC_NC = 5,
+ X86_CC_LZ = 6, X86_CC_S = 6,
+ X86_CC_GEZ = 7, X86_CC_NS = 7,
+ X86_CC_P = 8, X86_CC_PE = 8,
+ X86_CC_NP = 9, X86_CC_PO = 9,
+ X86_CC_O = 10,
+ X86_CC_NO = 11,
+ X86_NCC
+} X86_CC;
+
+/* FP status */
+enum {
+ X86_FP_C0 = 0x100,
+ X86_FP_C1 = 0x200,
+ X86_FP_C2 = 0x400,
+ X86_FP_C3 = 0x4000,
+ X86_FP_CC_MASK = 0x4500
+};
+
+/* FP control word */
+enum {
+ X86_FPCW_INVOPEX_MASK = 0x1,
+ X86_FPCW_DENOPEX_MASK = 0x2,
+ X86_FPCW_ZERODIV_MASK = 0x4,
+ X86_FPCW_OVFEX_MASK = 0x8,
+ X86_FPCW_UNDFEX_MASK = 0x10,
+ X86_FPCW_PRECEX_MASK = 0x20,
+ X86_FPCW_PRECC_MASK = 0x300,
+ X86_FPCW_ROUNDC_MASK = 0xc00,
+
+ /* values for precision control */
+ X86_FPCW_PREC_SINGLE = 0,
+ X86_FPCW_PREC_DOUBLE = 0x200,
+ X86_FPCW_PREC_EXTENDED = 0x300,
+
+ /* values for rounding control */
+ X86_FPCW_ROUND_NEAREST = 0,
+ X86_FPCW_ROUND_DOWN = 0x400,
+ X86_FPCW_ROUND_UP = 0x800,
+ X86_FPCW_ROUND_TOZERO = 0xc00
+};
+
+/*
+// prefix code
+*/
+typedef enum {
+ X86_LOCK_PREFIX = 0xF0,
+ X86_REPNZ_PREFIX = 0xF2,
+ X86_REPZ_PREFIX = 0xF3,
+ X86_REP_PREFIX = 0xF3,
+ X86_CS_PREFIX = 0x2E,
+ X86_SS_PREFIX = 0x36,
+ X86_DS_PREFIX = 0x3E,
+ X86_ES_PREFIX = 0x26,
+ X86_FS_PREFIX = 0x64,
+ X86_GS_PREFIX = 0x65,
+ X86_UNLIKELY_PREFIX = 0x2E,
+ X86_LIKELY_PREFIX = 0x3E,
+ X86_OPERAND_PREFIX = 0x66,
+ X86_ADDRESS_PREFIX = 0x67
+} X86_Prefix;
+
+static const unsigned char
+x86_cc_unsigned_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x72, /* lt */
+ 0x76, /* le */
+ 0x77, /* gt */
+ 0x73, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+static const unsigned char
+x86_cc_signed_map [X86_NCC] = {
+ 0x74, /* eq */
+ 0x75, /* ne */
+ 0x7c, /* lt */
+ 0x7e, /* le */
+ 0x7f, /* gt */
+ 0x7d, /* ge */
+ 0x78, /* lz */
+ 0x79, /* gez */
+ 0x7a, /* p */
+ 0x7b, /* np */
+ 0x70, /* o */
+ 0x71, /* no */
+};
+
+typedef union {
+ int val;
+ unsigned char b [4];
+} x86_imm_buf;
+
+#define X86_NOBASEREG (-1)
+
+/*
+// bitvector mask for callee-saved registers
+*/
+#define X86_ESI_MASK (1<<X86_ESI)
+#define X86_EDI_MASK (1<<X86_EDI)
+#define X86_EBX_MASK (1<<X86_EBX)
+#define X86_EBP_MASK (1<<X86_EBP)
+
+#define X86_CALLEE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX))
+#define X86_CALLER_REGS ((1<<X86_EBX) | (1<<X86_EBP) | (1<<X86_ESI) | (1<<X86_EDI))
+#define X86_BYTE_REGS ((1<<X86_EAX) | (1<<X86_ECX) | (1<<X86_EDX) | (1<<X86_EBX))
+
+#define X86_IS_SCRATCH(reg) (X86_CALLER_REGS & (1 << (reg))) /* X86_EAX, X86_ECX, or X86_EDX */
+#define X86_IS_CALLEE(reg) (X86_CALLEE_REGS & (1 << (reg))) /* X86_ESI, X86_EDI, X86_EBX, or X86_EBP */
+
+#define X86_IS_BYTE_REG(reg) ((reg) < 4)
+
+/*
+// Frame structure:
+//
+// +--------------------------------+
+// | in_arg[0] = var[0] |
+// | in_arg[1] = var[1] |
+// | . . . |
+// | in_arg[n_arg-1] = var[n_arg-1] |
+// +--------------------------------+
+// | return IP |
+// +--------------------------------+
+// | saved EBP | <-- frame pointer (EBP)
+// +--------------------------------+
+// | ... | n_extra
+// +--------------------------------+
+// | var[n_arg] |
+// | var[n_arg+1] | local variables area
+// | . . . |
+// | var[n_var-1] |
+// +--------------------------------+
+// | |
+// | |
+// | spill area | area for spilling mimic stack
+// | |
+// +--------------------------------|
+// | ebx |
+// | ebp [ESP_Frame only] |
+// | esi | 0..3 callee-saved regs
+// | edi | <-- stack pointer (ESP)
+// +--------------------------------+
+// | stk0 |
+// | stk1 | operand stack area/
+// | . . . | out args
+// | stkn-1 |
+// +--------------------------------|
+//
+//
+*/
+
+
+/*
+ * useful building blocks
+ */
+#define x86_modrm_mod(modrm) ((modrm) >> 6)
+#define x86_modrm_reg(modrm) (((modrm) >> 3) & 0x7)
+#define x86_modrm_rm(modrm) ((modrm) & 0x7)
+
+#define x86_address_byte(inst,m,o,r) do { *(inst)++ = ((((m)&0x03)<<6)|(((o)&0x07)<<3)|(((r)&0x07))); } while (0)
+#define x86_imm_emit32(inst,imm) \
+ do { \
+ x86_imm_buf imb; imb.val = (int) (imm); \
+ *(inst)++ = imb.b [0]; \
+ *(inst)++ = imb.b [1]; \
+ *(inst)++ = imb.b [2]; \
+ *(inst)++ = imb.b [3]; \
+ } while (0)
+#define x86_imm_emit16(inst,imm) do { *(short*)(inst) = (imm); (inst) += 2; } while (0)
+#define x86_imm_emit8(inst,imm) do { *(inst) = (unsigned char)((imm) & 0xff); ++(inst); } while (0)
+#define x86_is_imm8(imm) (((int)(imm) >= -128 && (int)(imm) <= 127))
+#define x86_is_imm16(imm) (((int)(imm) >= -(1<<16) && (int)(imm) <= ((1<<16)-1)))
+
+#define x86_reg_emit(inst,r,regno) do { x86_address_byte ((inst), 3, (r), (regno)); } while (0)
+#define x86_reg8_emit(inst,r,regno,is_rh,is_rnoh) do {x86_address_byte ((inst), 3, (is_rh)?((r)|4):(r), (is_rnoh)?((regno)|4):(regno));} while (0)
+#define x86_regp_emit(inst,r,regno) do { x86_address_byte ((inst), 0, (r), (regno)); } while (0)
+#define x86_mem_emit(inst,r,disp) do { x86_address_byte ((inst), 0, (r), 5); x86_imm_emit32((inst), (disp)); } while (0)
+
+#define kMaxMembaseEmitPadding 6
+
+#define x86_membase_emit_body(inst,r,basereg,disp) do {\
+ if ((basereg) == X86_ESP) { \
+ if ((disp) == 0) { \
+ x86_address_byte ((inst), 0, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), X86_ESP); \
+ x86_address_byte ((inst), 0, X86_ESP, X86_ESP); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ break; \
+ } \
+ if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), (basereg)); \
+ break; \
+ } \
+ if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__) && defined(TARGET_AMD64)
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ x64_nacl_membase_handler(&(inst), (basereg), (disp), (r)) ; \
+ } while (0)
+#else /* __default_codegen__ || 32-bit NaCl codegen */
+#define x86_membase_emit(inst,r,basereg,disp) \
+ do { \
+ x86_membase_emit_body((inst),(r),(basereg),(disp)); \
+ } while (0)
+#endif
+
+#define kMaxMemindexEmitPadding 6
+
+#define x86_memindex_emit(inst,r,basereg,disp,indexreg,shift) \
+ do { \
+ if ((basereg) == X86_NOBASEREG) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), 5); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } else if ((disp) == 0 && (basereg) != X86_EBP) { \
+ x86_address_byte ((inst), 0, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ } else if (x86_is_imm8((disp))) { \
+ x86_address_byte ((inst), 1, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit8 ((inst), (disp)); \
+ } else { \
+ x86_address_byte ((inst), 2, (r), 4); \
+ x86_address_byte ((inst), (shift), (indexreg), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ } \
+ } while (0)
+
+/*
+ * target is the position in the code where to jump to:
+ * target = code;
+ * .. output loop code...
+ * x86_mov_reg_imm (code, X86_EAX, 0);
+ * loop = code;
+ * x86_loop (code, -1);
+ * ... finish method
+ *
+ * patch displacement
+ * x86_patch (loop, target);
+ *
+ * ins should point at the start of the instruction that encodes a target.
+ * the instruction is inspected for validity and the correct displacement
+ * is inserted.
+ */
+#define x86_do_patch(ins,target) \
+ do { \
+ unsigned char* pos = (ins) + 1; \
+ int disp, size = 0; \
+ switch (*(unsigned char*)(ins)) { \
+ case 0xe8: case 0xe9: ++size; break; /* call, jump32 */ \
+ case 0x0f: if (!(*pos >= 0x70 && *pos <= 0x8f)) assert (0); \
+ ++size; ++pos; break; /* prefix for 32-bit disp */ \
+ case 0xe0: case 0xe1: case 0xe2: /* loop */ \
+ case 0xeb: /* jump8 */ \
+ /* conditional jump opcodes */ \
+ case 0x70: case 0x71: case 0x72: case 0x73: \
+ case 0x74: case 0x75: case 0x76: case 0x77: \
+ case 0x78: case 0x79: case 0x7a: case 0x7b: \
+ case 0x7c: case 0x7d: case 0x7e: case 0x7f: \
+ break; \
+ default: assert (0); \
+ } \
+ disp = (target) - pos; \
+ if (size) x86_imm_emit32 (pos, disp - 4); \
+ else if (x86_is_imm8 (disp - 1)) x86_imm_emit8 (pos, disp - 1); \
+ else assert (0); \
+ } while (0)
+
+#if defined( __native_client_codegen__ ) && defined(TARGET_X86)
+
+#define x86_skip_nops(inst) \
+ do { \
+ int in_nop = 0; \
+ do { \
+ in_nop = 0; \
+ if (inst[0] == 0x90) { \
+ in_nop = 1; \
+ inst += 1; \
+ } \
+ if (inst[0] == 0x8b && inst[1] == 0xc0) { \
+ in_nop = 1; \
+ inst += 2; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x6d \
+ && inst[2] == 0x00) { \
+ in_nop = 1; \
+ inst += 3; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0x64 \
+ && inst[2] == 0x24 && inst[3] == 0x00) { \
+ in_nop = 1; \
+ inst += 4; \
+ } \
+ /* skip inst+=5 case because it's the 4-byte + 1-byte case */ \
+ if (inst[0] == 0x8d && inst[1] == 0xad \
+ && inst[2] == 0x00 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00) { \
+ in_nop = 1; \
+ inst += 6; \
+ } \
+ if (inst[0] == 0x8d && inst[1] == 0xa4 \
+ && inst[2] == 0x24 && inst[3] == 0x00 \
+ && inst[4] == 0x00 && inst[5] == 0x00 \
+ && inst[6] == 0x00 ) { \
+ in_nop = 1; \
+ inst += 7; \
+ } \
+ } while ( in_nop ); \
+ } while (0)
+
+#if defined(__native_client__)
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = nacl_modify_patch_target((target)); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#else /* __native_client__ */
+#define x86_patch(ins,target) \
+ do { \
+ unsigned char* inst = (ins); \
+ guint8* new_target = (target); \
+ x86_skip_nops((inst)); \
+ x86_do_patch((inst), new_target); \
+ } while (0)
+#endif /* __native_client__ */
+
+#else
+#define x86_patch(ins,target) do { x86_do_patch((ins), (target)); } while (0)
+#endif /* __native_client_codegen__ */
+
+#ifdef __native_client_codegen__
+/* The breakpoint instruction is illegal in Native Client, although the HALT */
+/* instruction is allowed. The breakpoint is used several places in mini-x86.c */
+/* and exceptions-x86.c. */
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xf4; \
+ } while (0)
+#else
+#define x86_breakpoint(inst) \
+ do { \
+ *(inst)++ = 0xcc; \
+ } while (0)
+#endif
+
+#define x86_cld(inst) do { *(inst)++ =(unsigned char)0xfc; } while (0)
+#define x86_stosb(inst) do { *(inst)++ =(unsigned char)0xaa; } while (0)
+#define x86_stosl(inst) do { *(inst)++ =(unsigned char)0xab; } while (0)
+#define x86_stosd(inst) x86_stosl((inst))
+#define x86_movsb(inst) do { *(inst)++ =(unsigned char)0xa4; } while (0)
+#define x86_movsl(inst) do { *(inst)++ =(unsigned char)0xa5; } while (0)
+#define x86_movsd(inst) x86_movsl((inst))
+
+#if defined(__native_client_codegen__)
+#if defined(TARGET_X86)
+/* kNaClAlignment - 1 is the max value we can pass into x86_codegen_pre. */
+/* This keeps us from having to call x86_codegen_pre with specific */
+/* knowledge of the size of the instruction that follows it, and */
+/* localizes the alignment requirement to this spot. */
+#define x86_prefix(inst,p) \
+ do { \
+ x86_codegen_pre(&(inst), kNaClAlignment - 1); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* We need to tag any prefixes so we can perform proper membase sandboxing */
+/* See: mini-amd64.c:amd64_nacl_membase_handler for verbose details */
+#define x86_prefix(inst,p) \
+ do { \
+ x64_nacl_tag_legacy_prefix((inst)); \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+
+#endif /* TARGET_AMD64 */
+
+#else
+#define x86_prefix(inst,p) \
+ do { \
+ *(inst)++ =(unsigned char) (p); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_rdtsc(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = 0x0f; \
+ *(inst)++ = 0x31; \
+ } while (0)
+
+#define x86_cmpxchg_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_cmpxchg_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmpxchg_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xb1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xchg_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xchg_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xchg_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0x86; \
+ else \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_xadd_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_xadd_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_xadd_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ if ((size) == 1) \
+ *(inst)++ = (unsigned char)0xC0; \
+ else \
+ *(inst)++ = (unsigned char)0xC1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_inc_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_inc_reg(inst,reg) do { *(inst)++ = (unsigned char)0x40 + (reg); } while (0)
+
+#define x86_dec_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 1, (mem)); \
+ } while (0)
+
+#define x86_dec_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 1, (basereg), (disp)); \
+ } while (0)
+
+#define x86_dec_reg(inst,reg) do { *(inst)++ = (unsigned char)0x48 + (reg); } while (0)
+
+#define x86_not_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_not_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+
+#define x86_not_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_neg_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } while (0)
+
+#define x86_neg_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } while (0)
+
+#define x86_neg_reg(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 3, (reg)); \
+ } while (0)
+
+#define x86_nop(inst) do { *(inst)++ = (unsigned char)0x90; } while (0)
+
+#define x86_alu_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((reg) == X86_EAX) { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 5; \
+ x86_imm_emit32 ((inst), (imm)); \
+ break; \
+ } \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x83; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x81; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_alu_membase8_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x80; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_alu_mem_reg(inst,opc,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 1; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+/**
+ * @x86_alu_reg8_reg8:
+ * Supports ALU operations between two 8-bit registers.
+ * dreg := dreg opc reg
+ * X86_Reg_No enum is used to specify the registers.
+ * Additionally is_*_h flags are used to specify what part
+ * of a given 32-bit register is used - high (TRUE) or low (FALSE).
+ * For example: dreg = X86_EAX, is_dreg_h = TRUE -> use AH
+ */
+#define x86_alu_reg8_reg8(inst,opc,dreg,reg,is_dreg_h,is_reg_h) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 2; \
+ x86_reg8_emit ((inst), (dreg), (reg), (is_dreg_h), (is_reg_h)); \
+ } while (0)
+
+#define x86_alu_reg_mem(inst,opc,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_alu_reg_membase(inst,opc,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (((unsigned char)(opc)) << 3) + 3; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_test_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((reg) == X86_EAX) { \
+ *(inst)++ = (unsigned char)0xa9; \
+ } else { \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_mem_imm8(inst,mem,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xf6; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_mem_imm(inst,mem,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_membase_imm(inst,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_test_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_test_mem_reg(inst,mem,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_test_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x85; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_shift_reg_imm(inst,opc,reg,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_mem_imm(inst,opc,mem,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } else { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_membase_imm(inst,opc,basereg,disp,imm) \
+ do { \
+ if ((imm) == 1) { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } else { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc1; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_shift_reg(inst,opc,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_reg_emit ((inst), (opc), (reg)); \
+ } while (0)
+
+#define x86_shift_mem(inst,opc,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_shift_membase(inst,opc,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd3; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * Multi op shift missing.
+ */
+
+#define x86_shrd_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xad; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shrd_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xac; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+#define x86_shld_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa5; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ } while (0)
+
+#define x86_shld_reg_imm(inst,dreg,reg,shamt) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xa4; \
+ x86_reg_emit ((inst), (reg), (dreg)); \
+ x86_imm_emit8 ((inst), (shamt)); \
+ } while (0)
+
+/*
+ * EDX:EAX = EAX * rm
+ */
+#define x86_mul_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 4 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_mul_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 4 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_mul_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 4 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * r *= rm
+ */
+#define x86_imul_reg_reg(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_imul_reg_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_imul_reg_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0xaf; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+/*
+ * dreg = rm * imm
+ */
+#define x86_imul_reg_reg_imm(inst,dreg,reg,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_mem_imm(inst,reg,mem,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_imul_reg_membase_imm(inst,reg,basereg,disp,imm) \
+ do { \
+ if (x86_is_imm8 ((imm))) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x6b; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x69; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+/*
+ * divide EDX:EAX by rm;
+ * eax = quotient, edx = remainder
+ */
+
+#define x86_div_reg(inst,reg,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_reg_emit ((inst), 6 + ((is_signed) ? 1 : 0), (reg)); \
+ } while (0)
+
+#define x86_div_mem(inst,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_mem_emit ((inst), 6 + ((is_signed) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_div_membase(inst,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf7; \
+ x86_membase_emit ((inst), 6 + ((is_signed) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_mem_reg(inst,mem,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_mov_regp_reg(inst,regp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_regp_emit ((inst), (reg), (regp)); \
+ } while (0)
+
+#define x86_mov_membase_reg(inst,basereg,disp,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_memindex_reg(inst,basereg,disp,indexreg,shift,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x88; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x89; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_mov_reg_reg(inst,dreg,reg,size) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_mov_reg_mem(inst,reg,mem,size) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define kMovRegMembasePadding (2 + kMaxMembaseEmitPadding)
+
+#define x86_mov_reg_membase(inst,reg,basereg,disp,size) \
+ do { \
+ x86_codegen_pre(&(inst), kMovRegMembasePadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_mov_reg_memindex(inst,reg,basereg,disp,indexreg,shift,size) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ switch ((size)) { \
+ case 1: *(inst)++ = (unsigned char)0x8a; break; \
+ case 2: x86_prefix((inst), X86_OPERAND_PREFIX); /* fall through */ \
+ case 4: *(inst)++ = (unsigned char)0x8b; break; \
+ default: assert (0); \
+ } \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+/*
+ * Note: x86_clear_reg () chacnges the condition code!
+ */
+#define x86_clear_reg(inst,reg) x86_alu_reg_reg((inst), X86_XOR, (reg), (reg))
+
+#define x86_mov_reg_imm(inst,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xb8 + (reg); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_mov_mem_imm(inst,mem,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 9); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 10); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_membase_imm(inst,basereg,disp,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_mov_memindex_imm(inst,basereg,disp,indexreg,shift,imm,size) \
+ do { \
+ if ((size) == 1) { \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc6; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } else if ((size) == 2) { \
+ x86_codegen_pre(&(inst), 4 + kMaxMemindexEmitPadding); \
+ x86_prefix((inst), X86_OPERAND_PREFIX); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit16 ((inst), (imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xc7; \
+ x86_memindex_emit ((inst), 0, (basereg), (disp), (indexreg), (shift)); \
+ x86_imm_emit32 ((inst), (imm)); \
+ } \
+ } while (0)
+
+#define x86_lea_mem(inst,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_lea_membase(inst,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_lea_memindex(inst,reg,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x8d; \
+ x86_memindex_emit ((inst), (reg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_widen_reg(inst,dreg,reg,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ g_assert (is_half || X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_widen_mem(inst,dreg,mem,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_mem_emit ((inst), (dreg), (mem)); \
+ } while (0)
+
+#define x86_widen_membase(inst,dreg,basereg,disp,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_widen_memindex(inst,dreg,basereg,disp,indexreg,shift,is_signed,is_half) \
+ do { \
+ unsigned char op = 0xb6; \
+ x86_codegen_pre(&(inst), 2 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) op += 0x08; \
+ if ((is_half)) op += 0x01; \
+ *(inst)++ = op; \
+ x86_memindex_emit ((inst), (dreg), (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_cdq(inst) do { *(inst)++ = (unsigned char)0x99; } while (0)
+#define x86_wait(inst) do { *(inst)++ = (unsigned char)0x9b; } while (0)
+
+#define x86_fp_op_mem(inst,opc,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_mem_emit ((inst), (opc), (mem)); \
+ } while (0)
+
+#define x86_fp_op_membase(inst,opc,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdc : (unsigned char)0xd8; \
+ x86_membase_emit ((inst), (opc), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fp_op(inst,opc,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd8; \
+ *(inst)++ = (unsigned char)0xc0+((opc)<<3)+((index)&0x07); \
+ } while (0)
+
+#define x86_fp_op_reg(inst,opc,index,pop_stack) \
+ do { \
+ static const unsigned char map[] = { 0, 1, 2, 3, 5, 4, 7, 6, 8}; \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (pop_stack) ? (unsigned char)0xde : (unsigned char)0xdc; \
+ *(inst)++ = (unsigned char)0xc0+(map[(opc)]<<3)+((index)&0x07); \
+ } while (0)
+
+/**
+ * @x86_fp_int_op_membase
+ * Supports FPU operations between ST(0) and integer operand in memory.
+ * Operation encoded using X86_FP_Opcode enum.
+ * Operand is addressed by [basereg + disp].
+ * is_int specifies whether operand is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fp_int_op_membase(inst,opc,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_int) ? (unsigned char)0xda : (unsigned char)0xde; \
+ x86_membase_emit ((inst), opc, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fstp(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdd; \
+ *(inst)++ = (unsigned char)0xd8+(index); \
+ } while (0)
+
+#define x86_fcompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xde; \
+ *(inst)++ = (unsigned char)0xd9; \
+ } while (0)
+
+#define x86_fucompp(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xda; \
+ *(inst)++ = (unsigned char)0xe9; \
+ } while (0)
+
+#define x86_fnstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_fnstcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+#define x86_fnstcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fldcw(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fldcw_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fchs(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+#define x86_frem(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xf8; \
+ } while (0)
+
+#define x86_fxch(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fcomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xf0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomi(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdb; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fucomip(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe8 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fld(inst,mem,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_fld_membase(inst,basereg,disp,is_double) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd : (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fld80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } while (0)
+
+#define x86_fld80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } while (0)
+
+#define x86_fild(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 5, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fild_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 5, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fld_reg(inst,index) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xc0 + ((index) & 0x07); \
+ } while (0)
+
+#define x86_fldz(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xee; \
+ } while (0)
+
+#define x86_fld1(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xe8; \
+ } while (0)
+
+#define x86_fldpi(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xd9; \
+ *(inst)++ = (unsigned char)0xeb; \
+ } while (0)
+
+#define x86_fst(inst,mem,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_mem_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (mem)); \
+ } while (0)
+
+#define x86_fst_membase(inst,basereg,disp,is_double,pop_stack) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (is_double) ? (unsigned char)0xdd: (unsigned char)0xd9; \
+ x86_membase_emit ((inst), 2 + ((pop_stack) ? 1 : 0), (basereg), (disp)); \
+ } while (0)
+
+#define x86_fst80_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } while (0)
+
+
+#define x86_fst80_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } while (0)
+
+
+#define x86_fist_pop(inst,mem,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_mem_emit ((inst), 7, (mem)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_mem_emit ((inst), 3, (mem)); \
+ } \
+ } while (0)
+
+#define x86_fist_pop_membase(inst,basereg,disp,is_long) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_long)) { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 7, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 3, (basereg), (disp)); \
+ } \
+ } while (0)
+
+#define x86_fstsw(inst) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x9b; \
+ *(inst)++ = (unsigned char)0xdf; \
+ *(inst)++ = (unsigned char)0xe0; \
+ } while (0)
+
+/**
+ * @x86_fist_membase
+ * Converts content of ST(0) to integer and stores it at memory location
+ * addressed by [basereg + disp].
+ * is_int specifies whether destination is int32 (TRUE) or int16 (FALSE).
+ */
+#define x86_fist_membase(inst,basereg,disp,is_int) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ if ((is_int)) { \
+ *(inst)++ = (unsigned char)0xdb; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } else { \
+ *(inst)++ = (unsigned char)0xdf; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } \
+ } while (0)
+
+
+#define x86_push_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x50 + (reg); \
+ } while (0)
+
+#define x86_push_regp(inst,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_regp_emit ((inst), 6, (reg)); \
+ } while (0)
+
+#define x86_push_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 6, (mem)); \
+ } while (0)
+
+#define x86_push_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 6, (basereg), (disp)); \
+ } while (0)
+
+#define x86_push_memindex(inst,basereg,disp,indexreg,shift) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMemindexEmitPadding); \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_memindex_emit ((inst), 6, (basereg), (disp), (indexreg), (shift)); \
+ } while (0)
+
+#define x86_push_imm_template(inst) x86_push_imm (inst, 0xf0f0f0f0)
+
+#define x86_push_imm(inst,imm) \
+ do { \
+ int _imm = (int) (imm); \
+ if (x86_is_imm8 (_imm)) { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0x6A; \
+ x86_imm_emit8 ((inst), (_imm)); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x68; \
+ x86_imm_emit32 ((inst), (_imm)); \
+ } \
+ } while (0)
+
+#define x86_pop_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x58 + (reg); \
+ } while (0)
+
+#define x86_pop_mem(inst,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_pop_membase(inst,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 1 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x87; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_pushad(inst) do { *(inst)++ = (unsigned char)0x60; } while (0)
+#define x86_pushfd(inst) do { *(inst)++ = (unsigned char)0x9c; } while (0)
+#define x86_popad(inst) do { *(inst)++ = (unsigned char)0x61; } while (0)
+#define x86_popfd(inst) do { *(inst)++ = (unsigned char)0x9d; } while (0)
+
+#define x86_loop(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe2; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loope(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe1; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_loopne(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xe0; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_jump32(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* x64 specific files, so they need to be instrumented directly. */
+#define x86_jump32(inst,imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xe9; \
+ x86_imm_emit32 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+
+#define x86_jump8(inst,imm) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0xeb; \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_jump_reg(inst,reg) do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+/* Let's hope ECX is available for these... */
+#define x86_jump_mem(inst,mem) do { \
+ x86_mov_reg_mem(inst, (X86_ECX), (mem), 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) do { \
+ x86_mov_reg_membase(inst, (X86_ECX), basereg, disp, 4); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+
+/* like x86_jump_membase, but force a 32-bit displacement */
+#define x86_jump_membase32(inst,basereg,disp) do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, X86_ECX, (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_jump_reg(inst, (X86_ECX)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_jump_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 4, (reg)); \
+ } while (0)
+
+#define x86_jump_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 4, (mem)); \
+ } while (0)
+
+#define x86_jump_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 4, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+/*
+ * target is a pointer in our buffer.
+ */
+#define x86_jump_code_body(inst,target) \
+ do { \
+ int t; \
+ x86_codegen_pre(&(inst), 2); \
+ t = (unsigned char*)(target) - (inst) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ x86_codegen_pre(&(inst), 5); \
+ t = (unsigned char*)(target) - (inst) - 5; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+#if defined(TARGET_X86)
+#define x86_jump_code(inst,target) \
+ do { \
+ guint8* jump_start = (inst); \
+ x86_jump_code_body((inst),(target)); \
+ x86_patch(jump_start, (target)); \
+ } while (0)
+#else if defined(TARGET_AMD64)
+#define x86_jump_code(inst,target) \
+ do { \
+ /* jump_code_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after the */ \
+ /* call to x64_codegen_post */ \
+ x64_codegen_pre(inst); \
+ x86_jump_code_body((inst),(target)); \
+ inst = x64_codegen_post(inst); \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#endif
+#else
+#define x86_jump_code(inst,target) \
+ do { \
+ x86_jump_code_body((inst),(target)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_jump_disp(inst,disp) \
+ do { \
+ int t = (disp) - 2; \
+ if (x86_is_imm8(t)) { \
+ x86_jump8 ((inst), t); \
+ } else { \
+ t -= 3; \
+ x86_jump32 ((inst), t); \
+ } \
+ } while (0)
+
+#if defined(TARGET_X86)
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 6); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* These macros are used directly from mini-amd64.c and other */
+/* x64 specific files, so they need to be instrumented directly. */
+#define x86_branch8(inst,cond,imm,is_signed) \
+ do { \
+ x64_codegen_pre(inst); \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)]; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)]; \
+ x86_imm_emit8 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#define x86_branch32(inst,cond,imm,is_signed) \
+ do { \
+ x64_codegen_pre(inst); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x10; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x10; \
+ x86_imm_emit32 ((inst), (imm)); \
+ x64_codegen_post(inst); \
+ } while (0)
+#endif
+
+#if defined(TARGET_X86)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ int offset; \
+ guint8* branch_start; \
+ x86_codegen_pre(&(inst), 2); \
+ offset = (target) - (inst) - 2; \
+ branch_start = (inst); \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ x86_codegen_pre(&(inst), 6); \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ x86_patch(branch_start, (target)); \
+ } while (0)
+#elif defined(TARGET_AMD64)
+/* This macro is used directly from mini-amd64.c and other */
+/* x64 specific files, so it needs to be instrumented directly. */
+
+#define x86_branch_body(inst,cond,target,is_signed) \
+ do { \
+ int offset = (target) - (inst) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset = (target) - (inst) - 6; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#if defined(__native_client_codegen__)
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ /* branch_body is used twice because there are offsets */ \
+ /* calculated based on the IP, which can change after */ \
+ /* the call to x64_codegen_post */ \
+ x64_codegen_pre(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ inst = x64_codegen_post(inst); \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#else
+#define x86_branch(inst,cond,target,is_signed) \
+ do { \
+ x86_branch_body((inst),(cond),(target),(is_signed)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#endif /* TARGET_AMD64 */
+
+#define x86_branch_disp(inst,cond,disp,is_signed) \
+ do { \
+ int offset = (disp) - 2; \
+ if (x86_is_imm8 ((offset))) \
+ x86_branch8 ((inst), (cond), offset, (is_signed)); \
+ else { \
+ offset -= 4; \
+ x86_branch32 ((inst), (cond), offset, (is_signed)); \
+ } \
+ } while (0)
+
+#define x86_set_reg(inst,cond,reg,is_signed) \
+ do { \
+ g_assert (X86_IS_BYTE_REG (reg)); \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_reg_emit ((inst), 0, (reg)); \
+ } while (0)
+
+#define x86_set_mem(inst,cond,mem,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_mem_emit ((inst), 0, (mem)); \
+ } while (0)
+
+#define x86_set_membase(inst,cond,basereg,disp,is_signed) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] + 0x20; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] + 0x20; \
+ x86_membase_emit ((inst), 0, (basereg), (disp)); \
+ } while (0)
+
+#define x86_call_imm_body(inst,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xe8; \
+ x86_imm_emit32 ((inst), (int)(disp)); \
+ } while (0)
+
+#define x86_call_imm(inst,disp) \
+ do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_imm_body((inst), (disp)); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+#define x86_call_reg_internal(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0x83; /* and */ \
+ x86_reg_emit ((inst), 4, (reg)); /* reg */ \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* call */ \
+ x86_reg_emit ((inst), 2, (reg)); /* reg */ \
+ } while (0)
+
+#define x86_call_reg(inst, reg) do { \
+ x86_call_sequence_pre((inst)); \
+ x86_call_reg_internal(inst, reg); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+
+
+/* It appears that x86_call_mem() is never used, so I'm leaving it out. */
+#define x86_call_membase(inst,basereg,disp) do { \
+ x86_call_sequence_pre((inst)); \
+ /* x86_mov_reg_membase() inlined so its fixed size */ \
+ *(inst)++ = (unsigned char)0x8b; \
+ x86_address_byte ((inst), 2, (X86_ECX), (basereg)); \
+ x86_imm_emit32 ((inst), (disp)); \
+ x86_call_reg_internal(inst, X86_ECX); \
+ x86_call_sequence_post((inst)); \
+ } while (0)
+#else /* __native_client_codegen__ */
+#define x86_call_reg(inst,reg) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_reg_emit ((inst), 2, (reg)); \
+ } while (0)
+
+#define x86_call_mem(inst,mem) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_mem_emit ((inst), 2, (mem)); \
+ } while (0)
+
+#define x86_call_membase(inst,basereg,disp) \
+ do { \
+ *(inst)++ = (unsigned char)0xff; \
+ x86_membase_emit ((inst), 2, (basereg), (disp)); \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+
+#if defined( __native_client_codegen__ ) && defined( TARGET_X86 )
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ guint8* call_start; \
+ guint8* _aligned_start; \
+ x86_call_sequence_pre_val((inst)); \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ _aligned_start = x86_call_sequence_post_val((inst)); \
+ call_start = _aligned_start; \
+ _x86_offset = (unsigned char*)(target) - (_aligned_start); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((_aligned_start), _x86_offset); \
+ x86_patch(call_start, (target)); \
+ } while (0)
+
+#define SIZE_OF_RET 6
+#define x86_ret(inst) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ } while (0)
+
+/* pop return address */
+/* pop imm bytes from stack */
+/* return */
+#define x86_ret_imm(inst,imm) do { \
+ *(inst)++ = (unsigned char)0x59; /* pop ecx */ \
+ x86_alu_reg_imm ((inst), X86_ADD, X86_ESP, imm); \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x83; /* and 0xffffffff, ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+ *(inst)++ = (unsigned char)nacl_align_byte; \
+ *(inst)++ = (unsigned char)0xff; /* jmp ecx */ \
+ *(inst)++ = (unsigned char)0xe1; \
+} while (0)
+#else /* __native_client_codegen__ */
+
+#define x86_call_code(inst,target) \
+ do { \
+ int _x86_offset; \
+ _x86_offset = (unsigned char*)(target) - (inst); \
+ _x86_offset -= 5; \
+ x86_call_imm_body ((inst), _x86_offset); \
+ } while (0)
+
+#define x86_ret(inst) do { *(inst)++ = (unsigned char)0xc3; } while (0)
+
+#define x86_ret_imm(inst,imm) \
+ do { \
+ if ((imm) == 0) { \
+ x86_ret ((inst)); \
+ } else { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0xc2; \
+ x86_imm_emit16 ((inst), (imm)); \
+ } \
+ } while (0)
+#endif /* __native_client_codegen__ */
+
+#define x86_cmov_reg(inst,cond,is_signed,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_cmov_mem(inst,cond,is_signed,reg,mem) \
+ do { \
+ x86_codegen_pre(&(inst), 7); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_mem_emit ((inst), (reg), (mem)); \
+ } while (0)
+
+#define x86_cmov_membase(inst,cond,is_signed,reg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char) 0x0f; \
+ if ((is_signed)) \
+ *(inst)++ = x86_cc_signed_map [(cond)] - 0x30; \
+ else \
+ *(inst)++ = x86_cc_unsigned_map [(cond)] - 0x30; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_enter(inst,framesize) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xc8; \
+ x86_imm_emit16 ((inst), (framesize)); \
+ *(inst)++ = 0; \
+ } while (0)
+
+#define x86_leave(inst) do { *(inst)++ = (unsigned char)0xc9; } while (0)
+#define x86_sahf(inst) do { *(inst)++ = (unsigned char)0x9e; } while (0)
+
+#define x86_fsin(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfe; } while (0)
+#define x86_fcos(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xff; } while (0)
+#define x86_fabs(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe1; } while (0)
+#define x86_ftst(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe4; } while (0)
+#define x86_fxam(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xe5; } while (0)
+#define x86_fpatan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf3; } while (0)
+#define x86_fprem(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf8; } while (0)
+#define x86_fprem1(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf5; } while (0)
+#define x86_frndint(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfc; } while (0)
+#define x86_fsqrt(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xfa; } while (0)
+#define x86_fptan(inst) do { x86_codegen_pre(&(inst), 2); *(inst)++ = (unsigned char)0xd9; *(inst)++ = (unsigned char)0xf2; } while (0)
+
+#define x86_padding(inst,size) \
+ do { \
+ switch ((size)) { \
+ case 1: x86_nop ((inst)); break; \
+ case 2: *(inst)++ = 0x8b; \
+ *(inst)++ = 0xc0; break; \
+ case 3: *(inst)++ = 0x8d; *(inst)++ = 0x6d; \
+ *(inst)++ = 0x00; break; \
+ case 4: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ break; \
+ case 5: *(inst)++ = 0x8d; *(inst)++ = 0x64; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ x86_nop ((inst)); break; \
+ case 6: *(inst)++ = 0x8d; *(inst)++ = 0xad; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ break; \
+ case 7: *(inst)++ = 0x8d; *(inst)++ = 0xa4; \
+ *(inst)++ = 0x24; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; *(inst)++ = 0x00; \
+ *(inst)++ = 0x00; break; \
+ default: assert (0); \
+ } \
+ } while (0)
+
+#ifdef __native_client_codegen__
+
+#define kx86NaClLengthOfCallReg 5
+#define kx86NaClLengthOfCallImm 5
+#define kx86NaClLengthOfCallMembase (kx86NaClLengthOfCallReg + 6)
+
+#endif /* __native_client_codegen__ */
+
+#define x86_prolog(inst,frame_size,reg_mask) \
+ do { \
+ unsigned i, m = 1; \
+ x86_enter ((inst), (frame_size)); \
+ for (i = 0; i < X86_NREG; ++i, m <<= 1) { \
+ if ((reg_mask) & m) \
+ x86_push_reg ((inst), i); \
+ } \
+ } while (0)
+
+#define x86_epilog(inst,reg_mask) \
+ do { \
+ unsigned i, m = 1 << X86_EDI; \
+ for (i = X86_EDI; m != 0; i--, m=m>>1) { \
+ if ((reg_mask) & m) \
+ x86_pop_reg ((inst), i); \
+ } \
+ x86_leave ((inst)); \
+ x86_ret ((inst)); \
+ } while (0)
+
+
+typedef enum {
+ X86_SSE_SQRT = 0x51,
+ X86_SSE_RSQRT = 0x52,
+ X86_SSE_RCP = 0x53,
+ X86_SSE_ADD = 0x58,
+ X86_SSE_DIV = 0x5E,
+ X86_SSE_MUL = 0x59,
+ X86_SSE_SUB = 0x5C,
+ X86_SSE_MIN = 0x5D,
+ X86_SSE_MAX = 0x5F,
+ X86_SSE_COMP = 0xC2,
+ X86_SSE_AND = 0x54,
+ X86_SSE_ANDN = 0x55,
+ X86_SSE_OR = 0x56,
+ X86_SSE_XOR = 0x57,
+ X86_SSE_UNPCKL = 0x14,
+ X86_SSE_UNPCKH = 0x15,
+
+ X86_SSE_ADDSUB = 0xD0,
+ X86_SSE_HADD = 0x7C,
+ X86_SSE_HSUB = 0x7D,
+ X86_SSE_MOVSHDUP = 0x16,
+ X86_SSE_MOVSLDUP = 0x12,
+ X86_SSE_MOVDDUP = 0x12,
+
+ X86_SSE_PAND = 0xDB,
+ X86_SSE_POR = 0xEB,
+ X86_SSE_PXOR = 0xEF,
+
+ X86_SSE_PADDB = 0xFC,
+ X86_SSE_PADDW = 0xFD,
+ X86_SSE_PADDD = 0xFE,
+ X86_SSE_PADDQ = 0xD4,
+
+ X86_SSE_PSUBB = 0xF8,
+ X86_SSE_PSUBW = 0xF9,
+ X86_SSE_PSUBD = 0xFA,
+ X86_SSE_PSUBQ = 0xFB,
+
+ X86_SSE_PMAXSB = 0x3C, /*sse41*/
+ X86_SSE_PMAXSW = 0xEE,
+ X86_SSE_PMAXSD = 0x3D, /*sse41*/
+
+ X86_SSE_PMAXUB = 0xDE,
+ X86_SSE_PMAXUW = 0x3E, /*sse41*/
+ X86_SSE_PMAXUD = 0x3F, /*sse41*/
+
+ X86_SSE_PMINSB = 0x38, /*sse41*/
+ X86_SSE_PMINSW = 0xEA,
+ X86_SSE_PMINSD = 0x39,/*sse41*/
+
+ X86_SSE_PMINUB = 0xDA,
+ X86_SSE_PMINUW = 0x3A, /*sse41*/
+ X86_SSE_PMINUD = 0x3B, /*sse41*/
+
+ X86_SSE_PAVGB = 0xE0,
+ X86_SSE_PAVGW = 0xE3,
+
+ X86_SSE_PCMPEQB = 0x74,
+ X86_SSE_PCMPEQW = 0x75,
+ X86_SSE_PCMPEQD = 0x76,
+ X86_SSE_PCMPEQQ = 0x29, /*sse41*/
+
+ X86_SSE_PCMPGTB = 0x64,
+ X86_SSE_PCMPGTW = 0x65,
+ X86_SSE_PCMPGTD = 0x66,
+ X86_SSE_PCMPGTQ = 0x37, /*sse42*/
+
+ X86_SSE_PSADBW = 0xf6,
+
+ X86_SSE_PSHUFD = 0x70,
+
+ X86_SSE_PUNPCKLBW = 0x60,
+ X86_SSE_PUNPCKLWD = 0x61,
+ X86_SSE_PUNPCKLDQ = 0x62,
+ X86_SSE_PUNPCKLQDQ = 0x6C,
+
+ X86_SSE_PUNPCKHBW = 0x68,
+ X86_SSE_PUNPCKHWD = 0x69,
+ X86_SSE_PUNPCKHDQ = 0x6A,
+ X86_SSE_PUNPCKHQDQ = 0x6D,
+
+ X86_SSE_PACKSSWB = 0x63,
+ X86_SSE_PACKSSDW = 0x6B,
+
+ X86_SSE_PACKUSWB = 0x67,
+ X86_SSE_PACKUSDW = 0x2B,/*sse41*/
+
+ X86_SSE_PADDUSB = 0xDC,
+ X86_SSE_PADDUSW = 0xDD,
+ X86_SSE_PSUBUSB = 0xD8,
+ X86_SSE_PSUBUSW = 0xD9,
+
+ X86_SSE_PADDSB = 0xEC,
+ X86_SSE_PADDSW = 0xED,
+ X86_SSE_PSUBSB = 0xE8,
+ X86_SSE_PSUBSW = 0xE9,
+
+ X86_SSE_PMULLW = 0xD5,
+ X86_SSE_PMULLD = 0x40,/*sse41*/
+ X86_SSE_PMULHUW = 0xE4,
+ X86_SSE_PMULHW = 0xE5,
+ X86_SSE_PMULUDQ = 0xF4,
+
+ X86_SSE_PMOVMSKB = 0xD7,
+
+ X86_SSE_PSHIFTW = 0x71,
+ X86_SSE_PSHIFTD = 0x72,
+ X86_SSE_PSHIFTQ = 0x73,
+ X86_SSE_SHR = 2,
+ X86_SSE_SAR = 4,
+ X86_SSE_SHL = 6,
+
+ X86_SSE_PSRLW_REG = 0xD1,
+ X86_SSE_PSRAW_REG = 0xE1,
+ X86_SSE_PSLLW_REG = 0xF1,
+
+ X86_SSE_PSRLD_REG = 0xD2,
+ X86_SSE_PSRAD_REG = 0xE2,
+ X86_SSE_PSLLD_REG = 0xF2,
+
+ X86_SSE_PSRLQ_REG = 0xD3,
+ X86_SSE_PSLLQ_REG = 0xF3,
+
+ X86_SSE_PREFETCH = 0x18,
+ X86_SSE_MOVNTPS = 0x2B,
+ X86_SSE_MOVHPD_REG_MEMBASE = 0x16,
+ X86_SSE_MOVHPD_MEMBASE_REG = 0x17,
+
+ X86_SSE_MOVSD_REG_MEMBASE = 0x10,
+ X86_SSE_MOVSD_MEMBASE_REG = 0x11,
+
+ X86_SSE_PINSRB = 0x20,/*sse41*/
+ X86_SSE_PINSRW = 0xC4,
+ X86_SSE_PINSRD = 0x22,/*sse41*/
+
+ X86_SSE_PEXTRB = 0x14,/*sse41*/
+ X86_SSE_PEXTRW = 0xC5,
+ X86_SSE_PEXTRD = 0x16,/*sse41*/
+
+ X86_SSE_SHUFP = 0xC6,
+
+ X86_SSE_CVTDQ2PD = 0xE6,
+ X86_SSE_CVTDQ2PS = 0x5B,
+ X86_SSE_CVTPD2DQ = 0xE6,
+ X86_SSE_CVTPD2PS = 0x5A,
+ X86_SSE_CVTPS2DQ = 0x5B,
+ X86_SSE_CVTPS2PD = 0x5A,
+ X86_SSE_CVTTPD2DQ = 0xE6,
+ X86_SSE_CVTTPS2DQ = 0x5B,
+} X86_SSE_Opcode;
+
+
+/* minimal SSE* support */
+#define x86_movsd_reg_membase(inst,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (dreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_cvttsd2si(inst,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xf2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x2c; \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_reg_membase(inst,opc,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm8); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm8(inst,opc,dreg,reg, imm8) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg_imm8 ((inst), (opc), (dreg), (reg), (imm8)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase(inst,opc,dreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ x86_sse_alu_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_reg_imm(inst,opc,dreg,reg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+#define x86_sse_alu_pd_reg_membase_imm(inst,opc,dreg,basereg,disp,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4 + kMaxMembaseEmitPadding); \
+ x86_sse_alu_pd_reg_membase ((inst), (opc), (dreg),(basereg), (disp)); \
+ *(inst)++ = (unsigned char)(imm); \
+ } while (0)
+
+
+#define x86_sse_alu_ps_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ps_reg_reg_imm(inst,opc,dreg,reg, imm) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ *(inst)++ = (unsigned char)imm; \
+ } while (0)
+
+
+#define x86_sse_alu_sd_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_sd_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF2; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+#define x86_sse_alu_ss_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_reg_reg ((inst), (opc), (dreg), (reg)); \
+ } while (0)
+
+#define x86_sse_alu_ss_membase_reg(inst,opc,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0xF3; \
+ x86_sse_alu_membase_reg ((inst), (opc), (basereg), (disp), (reg)); \
+ } while (0)
+
+
+
+#define x86_sse_alu_sse41_reg_reg(inst,opc,dreg,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0F; \
+ *(inst)++ = (unsigned char)0x38; \
+ *(inst)++ = (unsigned char)(opc); \
+ x86_reg_emit ((inst), (dreg), (reg)); \
+ } while (0)
+
+#define x86_movups_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x10; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movups_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x11; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_membase_reg(inst,basereg,disp,reg) \
+ do { \
+ x86_codegen_pre(&(inst), 2 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x29; \
+ x86_membase_emit ((inst), (reg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_movaps_reg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 3); \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x28; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+
+#define x86_movd_reg_xreg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x7e; \
+ x86_reg_emit ((inst), (sreg), (dreg)); \
+ } while (0)
+
+#define x86_movd_xreg_reg(inst,dreg,sreg) \
+ do { \
+ x86_codegen_pre(&(inst), 4); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ } while (0)
+
+#define x86_movd_xreg_membase(inst,sreg,basereg,disp) \
+ do { \
+ x86_codegen_pre(&(inst), 3 + kMaxMembaseEmitPadding); \
+ *(inst)++ = (unsigned char)0x66; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x6e; \
+ x86_membase_emit ((inst), (sreg), (basereg), (disp)); \
+ } while (0)
+
+#define x86_pshufw_reg_reg(inst,dreg,sreg,mask,high_words) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ *(inst)++ = (unsigned char)(high_words) ? 0xF3 : 0xF2; \
+ *(inst)++ = (unsigned char)0x0f; \
+ *(inst)++ = (unsigned char)0x70; \
+ x86_reg_emit ((inst), (dreg), (sreg)); \
+ *(inst)++ = (unsigned char)mask; \
+ } while (0)
+
+#define x86_sse_shift_reg_imm(inst,opc,mode, dreg,imm) \
+ do { \
+ x86_codegen_pre(&(inst), 5); \
+ x86_sse_alu_pd_reg_reg (inst, opc, mode, dreg); \
+ x86_imm_emit8 ((inst), (imm)); \
+ } while (0)
+
+#define x86_sse_shift_reg_reg(inst,opc,dreg,sreg) \
+ do { \
+ x86_sse_alu_pd_reg_reg (inst, opc, dreg, sreg); \
+ } while (0)
+
+
+
+#endif // X86_H
+
OpenPOWER on IntegriCloud