summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHaruki Hasegawa <h6a.h4i.0@gmail.com>2016-05-05 13:24:55 +0900
committerHaruki Hasegawa <h6a.h4i.0@gmail.com>2016-05-05 13:24:55 +0900
commitda3213cf045e0c7c4971d8b44272d1d86d689ceb (patch)
tree33e17e0a166f03307ebf11e8ab2891ae1ab90f61
parentfa1780c68593762b1e4bdbc46d83912db3eba27a (diff)
parent944d14c9151f6b20145de0cdae38e366e73c9432 (diff)
downloadffts-da3213cf045e0c7c4971d8b44272d1d86d689ceb.zip
ffts-da3213cf045e0c7c4971d8b44272d1d86d689ceb.tar.gz
Merge remote-tracking branch 'linkotec/master'
-rw-r--r--.travis.yml12
-rw-r--r--CMakeLists.txt462
-rw-r--r--README27
-rw-r--r--README.md35
-rw-r--r--include/ffts.h96
-rw-r--r--src/arch/.gitignore6
-rw-r--r--src/arch/ChangeLog4805
-rw-r--r--src/arch/LICENSE21
-rw-r--r--src/arch/Makefile.am11
-rw-r--r--src/arch/README7
-rw-r--r--src/arch/arm/.gitattributes1
-rw-r--r--src/arch/arm/.gitignore15
-rw-r--r--src/arch/arm/Makefile.am27
-rw-r--r--src/arch/arm/arm-codegen.c193
-rw-r--r--src/arch/arm/arm-codegen.h1127
-rw-r--r--src/arch/arm/arm-dis.c509
-rw-r--r--src/arch/arm/arm-dis.h41
-rw-r--r--src/arch/arm/arm-vfp-codegen.h247
-rw-r--r--src/arch/arm/arm-wmmx.h177
-rw-r--r--src/arch/arm/cmp_macros.th56
-rw-r--r--src/arch/arm/dpi_macros.th112
-rwxr-xr-xsrc/arch/arm/dpiops.sh30
-rw-r--r--src/arch/arm/mov_macros.th121
-rw-r--r--src/arch/arm/tramp.c710
-rw-r--r--src/arch/arm/vfp_macros.th15
-rw-r--r--src/arch/arm/vfpm_macros.th14
-rwxr-xr-xsrc/arch/arm/vfpops.sh24
-rw-r--r--src/arch/arm64/.gitignore6
-rw-r--r--src/arch/arm64/Makefile.am0
-rw-r--r--src/arch/arm64/arm64-codegen.h3
-rw-r--r--src/arch/ia64/.gitignore2
-rw-r--r--src/arch/ia64/Makefile.am3
-rw-r--r--src/arch/ia64/codegen.c861
-rw-r--r--src/arch/ia64/ia64-codegen.h3183
-rw-r--r--src/arch/mips/.gitignore6
-rw-r--r--src/arch/mips/Makefile.am8
-rw-r--r--src/arch/mips/mips-codegen.h435
-rw-r--r--src/arch/mips/test.c159
-rw-r--r--src/arch/ppc/.gitignore7
-rw-r--r--src/arch/ppc/Makefile.am1
-rw-r--r--src/arch/ppc/ppc-codegen.h953
-rw-r--r--src/arch/s390x/.gitignore6
-rw-r--r--src/arch/s390x/ChangeLog35
-rw-r--r--src/arch/s390x/Makefile.am7
-rw-r--r--src/arch/s390x/s390x-codegen.h997
-rw-r--r--src/arch/s390x/tramp.c1149
-rw-r--r--src/arch/sparc/.gitignore3
-rw-r--r--src/arch/sparc/Makefile.am7
-rw-r--r--src/arch/sparc/sparc-codegen.h955
-rw-r--r--src/arch/sparc/test.c123
-rw-r--r--src/arch/sparc/tramp.c1080
-rw-r--r--src/arch/x64/.gitignore4
-rw-r--r--src/arch/x64/Makefile.am2
-rw-r--r--src/arch/x64/x64-codegen.h1938
-rw-r--r--src/arch/x86/.gitignore6
-rw-r--r--src/arch/x86/Makefile.am1
-rw-r--r--src/arch/x86/x86-codegen.h2647
-rw-r--r--src/codegen.c1167
-rw-r--r--src/codegen.h28
-rw-r--r--src/codegen_arm.h132
-rw-r--r--src/codegen_sse.h1820
-rw-r--r--src/ffts.c875
-rw-r--r--src/ffts.h186
-rw-r--r--src/ffts_attributes.h111
-rw-r--r--src/ffts_dd.h230
-rw-r--r--src/ffts_internal.h211
-rw-r--r--src/ffts_nd.c440
-rw-r--r--src/ffts_nd.h89
-rw-r--r--src/ffts_real.c831
-rw-r--r--src/ffts_real.h83
-rw-r--r--src/ffts_real_nd.c410
-rw-r--r--src/ffts_real_nd.h84
-rw-r--r--src/ffts_small.c157
-rw-r--r--src/ffts_small.h14
-rw-r--r--src/ffts_static.c1292
-rw-r--r--src/ffts_static.h120
-rw-r--r--src/ffts_transpose.c194
-rw-r--r--src/ffts_transpose.h46
-rw-r--r--src/ffts_trig.c628
-rw-r--r--src/ffts_trig.h56
-rw-r--r--src/macros-alpha.h325
-rw-r--r--src/macros-neon.h168
-rw-r--r--src/macros-sse.h141
-rw-r--r--src/macros.h308
-rw-r--r--src/neon.h89
-rw-r--r--src/neon.s1508
-rw-r--r--src/neon_float.h1127
-rw-r--r--src/neon_static.s1255
-rw-r--r--src/neon_static_f.s956
-rw-r--r--src/neon_static_i.s955
-rw-r--r--src/patterns.c209
-rw-r--r--src/patterns.h547
-rw-r--r--src/sequitur.h448
-rw-r--r--src/sse.s878
-rw-r--r--src/types.h29
-rw-r--r--src/vfp.s99
-rw-r--r--tests/test.c247
97 files changed, 34019 insertions, 7962 deletions
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..1e70e8e
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,12 @@
+language: c
+os:
+ - linux
+ - osx
+addons:
+ apt:
+ packages:
+ - cmake
+ sources:
+ - kubuntu-backports
+script:
+ - mkdir build && cd build && cmake .. && cmake --build .
diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..2028c03
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,462 @@
+cmake_minimum_required(VERSION 2.8.12 FATAL_ERROR)
+
+project(ffts C ASM)
+
+# TODO: to support AutoConfigure building, this should came from "template" file
+set(FFTS_MAJOR 0)
+set(FFTS_MINOR 9)
+set(FFTS_MICRO 0)
+
+set(FFTS_VERSION "ffts-${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO}")
+
+set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
+set_property(GLOBAL PROPERTY USE_FOLDERS ON)
+
+# default build type is Debug which means no optimization
+if(NOT CMAKE_BUILD_TYPE)
+ set(CMAKE_BUILD_TYPE "Release")
+endif(NOT CMAKE_BUILD_TYPE)
+
+# common options
+option(ENABLE_NEON
+ "Enables the use of NEON instructions." OFF
+)
+
+option(ENABLE_VFP
+ "Enables the use of VFP instructions." OFF
+)
+
+option(DISABLE_DYNAMIC_CODE
+ "Disables the use of dynamic machine code generation." OFF
+)
+
+option(GENERATE_POSITION_INDEPENDENT_CODE
+ "Generate position independent code" OFF
+)
+
+option(ENABLE_SHARED
+ "Enable building a shared library." OFF
+)
+
+option(ENABLE_STATIC
+ "Enable building a static library." ON
+)
+
+include(CheckCSourceCompiles)
+include(CheckCSourceRuns)
+include(CheckIncludeFile)
+
+# Ensure defined when building FFTS (as opposed to using it from
+# another project). Used to export functions from Windows DLL.
+add_definitions(-DFFTS_BUILD)
+
+# check existence of various headers
+check_include_file(malloc.h HAVE_MALLOC_H)
+check_include_file(stdint.h HAVE_STDINT_H)
+check_include_file(stdlib.h HAVE_STDLIB_H)
+check_include_file(string.h HAVE_STRING_H)
+check_include_file(sys/mman.h HAVE_SYS_MMAN_H)
+check_include_file(unistd.h HAVE_UNISTD_H)
+
+if(HAVE_MALLOC_H)
+ add_definitions(-DHAVE_MALLOC_H)
+endif(HAVE_MALLOC_H)
+
+if(HAVE_STDINT_H)
+ add_definitions(-DHAVE_STDINT_H)
+endif(HAVE_STDINT_H)
+
+if(HAVE_STDLIB_H)
+ add_definitions(-DHAVE_STDLIB_H)
+endif(HAVE_STDLIB_H)
+
+if(HAVE_STRING_H)
+ add_definitions(-DHAVE_STRING_H)
+endif(HAVE_STRING_H)
+
+if(HAVE_SYS_MMAN_H)
+ add_definitions(-DHAVE_SYS_MMAN_H)
+endif(HAVE_SYS_MMAN_H)
+
+if(HAVE_UNISTD_H)
+ add_definitions(-DHAVE_UNISTD_H)
+endif(HAVE_UNISTD_H)
+
+# backup flags
+set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+
+# Determinate if we are cross-compiling
+if(NOT CMAKE_CROSSCOMPILING)
+ if(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
+ # Determinate ARM architecture
+
+ # Try to execute quietly without messages
+ set(CMAKE_REQUIRED_QUIET 1)
+
+ # The test for ARM architecture
+ set(TEST_SOURCE_CODE "int main() { return 0; }")
+
+ # GCC documentation says "native" is only supported on Linux, but let's try
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=native")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+ # Fallback trying generic ARMv7
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv7-a")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+ # Fallback trying generic ARMv6
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -march=armv6")
+ check_c_source_runs("${TEST_SOURCE_CODE}" GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+
+ if(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+ message(WARNING "FFTS failed to determinate ARM architecture")
+ set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
+ else()
+ message("FFTS is build using 'march=armv6'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv6")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv6")
+ endif(NOT GCC_MARCH_ARMV6_FLAG_SUPPORTED)
+ else()
+ message("FFTS is build using 'march=armv7-a'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=armv7-a")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=armv7-a")
+ endif(NOT GCC_MARCH_ARMV7A_FLAG_SUPPORTED)
+ else()
+ message("FFTS is build using 'march=native'")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -march=native")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=native")
+ endif(NOT GCC_MARCH_NATIVE_FLAG_SUPPORTED)
+
+ # Determinate what floating-point hardware (or hardware emulation) is available
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+
+ # The test for ARM NEON support
+ set(TEST_SOURCE_CODE "
+ #include <arm_neon.h>
+ int main()
+ {
+ float32x4_t v;
+ float zeros[4] = {0.0f, 0.0f, 0.0f, 0.0f};
+ v = vld1q_f32(zeros);
+ return 0;
+ }"
+ )
+
+ # Test running with -mfpu=neon and -mfloat-abi=hard
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=hard")
+ check_c_source_runs("${TEST_SOURCE_CODE}" NEON_HARDFP_SUPPORTED)
+
+ if(NOT NEON_HARDFP_SUPPORTED)
+ # Test running with -mfpu=neon and -mfloat-abi=softfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=neon -mfloat-abi=softfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" NEON_SOFTFP_SUPPORTED)
+
+ if(NOT NEON_SOFTFP_SUPPORTED)
+ if(ENABLE_NEON)
+ message(FATAL_ERROR "FFTS cannot enable NEON on this platform")
+ endif(ENABLE_NEON)
+ else()
+ message("FFTS is using 'neon' FPU and 'softfp' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=softfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=softfp")
+ set(ENABLE_NEON ON)
+ endif(NOT NEON_SOFTFP_SUPPORTED)
+ else()
+ message("FFTS is using 'neon' FPU and 'hard' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=neon -mfloat-abi=hard")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=neon -mfloat-abi=hard")
+ set(ENABLE_NEON ON)
+ endif(NOT NEON_HARDFP_SUPPORTED)
+
+ # Fallback using VFP if NEON is not supported
+ if(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED)
+ # Test for ARM VFP support
+ set(TEST_SOURCE_CODE "
+ double sum(double a, double b)
+ {
+ return a + b;
+ }
+ int main()
+ {
+ double s1, s2, v1 = 1.0, v2 = 2.0, v3 = 1.0e-322;
+ s1 = sum(v1, v2);
+ s2 = sum(v3, v3);
+ return 0;
+ }"
+ )
+
+ # Test running with -mfpu=vfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfpu=vfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" VFP_SUPPORTED)
+
+ if(NOT VFP_SUPPORTED)
+ # Fallback using emulation if VFP is not supported
+ if(ENABLE_VFP)
+ message(FATAL_ERROR "FFTS cannot enable VFP on this platform")
+ endif(ENABLE_VFP)
+
+ message(WARNING "FFTS is using 'soft' FPU")
+ else()
+ message("FFTS is using 'vfp' FPU")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfpu=vfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfpu=vfp")
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ set(ENABLE_VFP ON)
+ endif(NOT VFP_SUPPORTED)
+
+ # Test running with -mfloat-abi=hard
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=hard")
+
+ # Use the same test as before
+ check_c_source_runs("${TEST_SOURCE_CODE}" HARDFP_SUPPORTED)
+
+ if(NOT HARDFP_SUPPORTED)
+ # Test running with -mfloat-abi=softfp
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -mfloat-abi=softfp")
+ check_c_source_runs("${TEST_SOURCE_CODE}" SOFTFP_SUPPORTED)
+
+ if(NOT SOFTFP_SUPPORTED)
+ # Most likely development libraries are missing
+ message(WARNING "FFTS is using 'soft' float ABI")
+ else()
+ message("FFTS is using 'softfp' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=softfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp")
+ endif(NOT SOFTFP_SUPPORTED)
+ else()
+ message("FFTS is using 'hard' float ABI")
+ set(CMAKE_ASM_FLAGS "${CMAKE_ASM_FLAGS} -mfloat-abi=hard")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard")
+ endif(NOT HARDFP_SUPPORTED)
+ endif(NOT NEON_HARDFP_SUPPORTED AND NOT NEON_SOFTFP_SUPPORTED)
+ else()
+ # enable SSE code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE intrinsics
+ check_include_file(xmmintrin.h HAVE_XMMINTRIN_H)
+ if(HAVE_XMMINTRIN_H)
+ add_definitions(-DHAVE_SSE)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ endif(HAVE_XMMINTRIN_H)
+
+ # enable SSE2 code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse2")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE2 intrinsics
+ check_include_file(emmintrin.h HAVE_EMMINTRIN_H)
+ if(HAVE_EMMINTRIN_H)
+ add_definitions(-DHAVE_SSE2)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ endif(HAVE_EMMINTRIN_H)
+
+ # enable SSE3 code generation
+ if(CMAKE_COMPILER_IS_GNUCC)
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS_SAVE} -msse3")
+ endif(CMAKE_COMPILER_IS_GNUCC)
+
+ # check if the platform has support for SSE3 intrinsics
+ check_include_file(pmmintrin.h HAVE_PMMINTRIN_H)
+ if(HAVE_PMMINTRIN_H)
+ add_definitions(-DHAVE_PMMINTRIN_H)
+ add_definitions(-DHAVE_SSE3)
+ set(CMAKE_REQUIRED_FLAGS_SAVE ${CMAKE_REQUIRED_FLAGS})
+ else()
+ # check if the platform has specific intrinsics
+ check_include_file(intrin.h HAVE_INTRIN_H)
+ if(HAVE_INTRIN_H)
+ add_definitions(-DHAVE_INTRIN_H)
+
+ check_c_source_compiles("
+ #include<intrin.h>
+ int main(int argc, char** argv)
+ {
+ (void) argv;
+ (void) argc;
+ return _mm_movemask_ps(_mm_moveldup_ps(_mm_set_ss(1.0f)));
+ }" HAVE__MM_MOVELDUP_PS
+ )
+
+ if(HAVE__MM_MOVELDUP_PS)
+ # assume that we have all SSE3 intrinsics
+ add_definitions(-DHAVE_SSE3)
+ endif(HAVE__MM_MOVELDUP_PS)
+ endif(HAVE_INTRIN_H)
+ endif(HAVE_PMMINTRIN_H)
+ endif(CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
+else()
+ # TODO: Add detections for compiler support and headers
+endif(NOT CMAKE_CROSSCOMPILING)
+
+# restore flags
+set(CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
+
+# compiler settings
+if(MSVC)
+ # enable all warnings but also disable some..
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4 /wd4127")
+
+ # mark debug versions
+ set(CMAKE_DEBUG_POSTFIX "d")
+
+ add_definitions(-D_USE_MATH_DEFINES)
+elseif(CMAKE_COMPILER_IS_GNUCC)
+ include(CheckCCompilerFlag)
+ include(CheckLibraryExists)
+
+ # enable all warnings
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra")
+
+ # check if we can control visibility of symbols
+ check_c_compiler_flag(-fvisibility=hidden HAVE_GCC_VISIBILITY)
+ if(HAVE_GCC_VISIBILITY)
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fvisibility=hidden")
+ add_definitions(-DHAVE_GCC_VISIBILITY)
+ endif(HAVE_GCC_VISIBILITY)
+
+ # some systems need libm for the math functions to work
+ check_library_exists(m pow "" HAVE_LIBM)
+ if(HAVE_LIBM)
+ list(APPEND CMAKE_REQUIRED_LIBRARIES m)
+ list(APPEND FFTS_EXTRA_LIBRARIES m)
+ endif(HAVE_LIBM)
+
+ if(HAVE_PMMINTRIN_H)
+ add_definitions(-msse3)
+ elseif(HAVE_EMMINTRIN_H)
+ add_definitions(-msse2)
+ elseif(HAVE_XMMINTRIN_H)
+ add_definitions(-msse)
+ endif(HAVE_PMMINTRIN_H)
+endif(MSVC)
+
+include_directories(include)
+include_directories(src)
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+set(FFTS_HEADERS
+ include/ffts.h
+)
+
+set(FFTS_SOURCES
+ src/ffts_attributes.h
+ src/ffts.c
+ src/ffts_internal.h
+ src/ffts_nd.c
+ src/ffts_nd.h
+ src/ffts_real.h
+ src/ffts_real.c
+ src/ffts_real_nd.c
+ src/ffts_real_nd.h
+ src/ffts_transpose.c
+ src/ffts_transpose.h
+ src/ffts_trig.c
+ src/ffts_trig.h
+ src/ffts_static.c
+ src/ffts_static.h
+ src/macros.h
+ src/patterns.h
+ src/types.h
+)
+
+if(ENABLE_NEON)
+ list(APPEND FFTS_SOURCES
+ src/neon.s
+ )
+
+ if(DISABLE_DYNAMIC_CODE)
+ list(APPEND FFTS_SOURCES
+ src/neon_static.s
+ )
+ endif(DISABLE_DYNAMIC_CODE)
+
+ add_definitions(-DHAVE_NEON)
+elseif(ENABLE_VFP)
+ if(NOT DISABLE_DYNAMIC_CODE)
+ list(APPEND FFTS_SOURCES
+ src/vfp.s
+ )
+ endif(NOT DISABLE_DYNAMIC_CODE)
+
+ add_definitions(-DHAVE_VFP)
+elseif(HAVE_XMMINTRIN_H)
+ add_definitions(-DHAVE_SSE)
+
+ list(APPEND FFTS_SOURCES
+ src/macros-sse.h
+ )
+
+ if(NOT DISABLE_DYNAMIC_CODE)
+ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ list(APPEND FFTS_SOURCES
+ src/codegen_sse.h
+ )
+ else()
+ message(WARNING "Dynamic code is only supported with x64, disabling dynamic code.")
+ set(DISABLE_DYNAMIC_CODE ON)
+ endif(CMAKE_SIZEOF_VOID_P EQUAL 8)
+ endif(NOT DISABLE_DYNAMIC_CODE)
+endif(ENABLE_NEON)
+
+if(DISABLE_DYNAMIC_CODE)
+ add_definitions(-DDYNAMIC_DISABLED)
+else()
+ list(APPEND FFTS_SOURCES
+ src/codegen.c
+ src/codegen.h
+ )
+endif(DISABLE_DYNAMIC_CODE)
+
+if(GENERATE_POSITION_INDEPENDENT_CODE)
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+endif(GENERATE_POSITION_INDEPENDENT_CODE)
+
+if(ENABLE_SHARED)
+ add_library(ffts_shared SHARED
+ ${FFTS_HEADERS}
+ ${FFTS_SOURCES}
+ )
+
+ # On unix-like platforms the library is called "libffts.so" and on Windows "ffts.dll"
+ set_target_properties(ffts_shared PROPERTIES
+ DEFINE_SYMBOL FFTS_SHARED
+ OUTPUT_NAME ffts
+ VERSION ${FFTS_MAJOR}.${FFTS_MINOR}.${FFTS_MICRO}
+ )
+endif(ENABLE_SHARED)
+
+if(ENABLE_STATIC)
+ add_library(ffts_static STATIC
+ ${FFTS_HEADERS}
+ ${FFTS_SOURCES}
+ )
+
+ if(UNIX)
+ # On unix-like platforms the library is called "libffts.a"
+ set_target_properties(ffts_static PROPERTIES OUTPUT_NAME ffts)
+ endif(UNIX)
+endif(ENABLE_STATIC)
+
+if(ENABLE_STATIC OR ENABLE_SHARED)
+ add_executable(ffts_test
+ tests/test.c
+ )
+
+ # link with static library by default
+ if(ENABLE_STATIC)
+ add_library(ffts ALIAS ffts_static)
+ else()
+ add_library(ffts ALIAS ffts_shared)
+ endif(ENABLE_STATIC)
+
+ target_link_libraries(ffts_test
+ ffts
+ ${FFTS_EXTRA_LIBRARIES}
+ )
+endif(ENABLE_STATIC OR ENABLE_SHARED)
diff --git a/README b/README
deleted file mode 100644
index d2f320b..0000000
--- a/README
+++ /dev/null
@@ -1,27 +0,0 @@
-FFTS -- The Fastest Fourier Transform in the South
-by Anthony Blake <anthonix@me.com>
-
-To build for Android, edit and run build_android.sh
-
-To build for iOS, edit and run build_iphone.sh
-
-To build for Linux or OS X on x86, run
-./configure --enable-sse --enable-single --prefix=/usr/local
-make
-make install
-
-FFTS dynamically generates code at runtime. This can be disabled with
---disable-dynamic-code
-
-For JNI targets: --enable-jni will build the jni stuff automatically for
-the host target, and --enable-shared must also be added manually for it to
-work.
-
-If you like FFTS, please show your support by sending a postcard to:
-
-Anthony Blake
-Department of Computer Science
-The University of Waikato
-Private Bag 3105
-Hamilton 3240
-NEW ZEALAND
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..50fb60e
--- /dev/null
+++ b/README.md
@@ -0,0 +1,35 @@
+# FFTS -- The Fastest Fourier Transform in the South
+
+[![Build Status](https://travis-ci.org/linkotec/ffts.svg?branch=master)](https://travis-ci.org/linkotec/ffts)
+
+To build for Android, edit and run build_android.sh
+
+To build for iOS, edit and run build_iphone.sh
+
+To build for Linux or OS X on x86, run
+ ./configure --enable-sse --enable-single --prefix=/usr/local
+ make
+ make install
+
+Optionally build for Windows and Linux with CMake, run
+ mkdir build
+ cd build
+ cmake ..
+
+FFTS dynamically generates code at runtime. This can be disabled with
+--disable-dynamic-code
+
+Note that 32 bit x86 dynamic machine code generation is not supported at the moment.
+
+For JNI targets: --enable-jni will build the jni stuff automatically for
+the host target, and --enable-shared must also be added manually for it to
+work.
+
+If you like FFTS, please show your support by sending a postcard to:
+
+Anthony Blake<br>
+Department of Computer Science<br>
+The University of Waikato<br>
+Private Bag 3105<br>
+Hamilton 3240<br>
+NEW ZEALAND
diff --git a/include/ffts.h b/include/ffts.h
index 63173bb..cc85a88 100644
--- a/include/ffts.h
+++ b/include/ffts.h
@@ -1,7 +1,7 @@
/*
-
+
This file is part of FFTS.
-
+
Copyright (c) 2012, Anthony M. Blake
All rights reserved.
@@ -29,44 +29,82 @@
*/
-#ifndef __FFTS_H__
-#define __FFTS_H__
+#ifndef FFTS_H
+#define FFTS_H
+
+#if defined (_MSC_VER) && (_MSC_VER >= 1020)
+#pragma once
+#endif
-#include <stdio.h>
-#include <stdlib.h>
-#include <math.h>
-#include <stdint.h>
#include <stddef.h>
#ifdef __cplusplus
-extern "C"
-{
-#endif /* __cplusplus */
+extern "C" {
+#endif
-#define POSITIVE_SIGN 1
-#define NEGATIVE_SIGN -1
+#if (defined(_WIN32) || defined(WIN32)) && defined(FFTS_SHARED)
+# ifdef FFTS_BUILD
+# define FFTS_API __declspec(dllexport)
+# else
+# define FFTS_API __declspec(dllimport)
+# endif
+#else
+# if (__GNUC__ >= 4) || defined(HAVE_GCC_VISIBILITY)
+# define FFTS_API __attribute__ ((visibility("default")))
+# else
+# define FFTS_API
+# endif
+#endif
+
+/* The direction of the transform
+ (i.e, the sign of the exponent in the transform.)
+*/
+#define FFTS_FORWARD (-1)
+#define FFTS_BACKWARD (+1)
struct _ffts_plan_t;
typedef struct _ffts_plan_t ffts_plan_t;
-ffts_plan_t *ffts_init_1d(size_t N, int sign);
-ffts_plan_t *ffts_init_2d(size_t N1, size_t N2, int sign);
-ffts_plan_t *ffts_init_nd(int rank, size_t *Ns, int sign);
+/* Complex data is stored in the interleaved format
+ (i.e, the real and imaginary parts composing each
+ element of complex data are stored adjacently in memory)
-// For real transforms, sign == -1 implies a real-to-complex forwards tranform,
-// and sign == 1 implies a complex-to-real backwards transform
-// The output of a real-to-complex transform is N/2+1 complex numbers, where the
-// redundant outputs have been omitted.
-ffts_plan_t *ffts_init_1d_real(size_t N, int sign);
-ffts_plan_t *ffts_init_2d_real(size_t N1, size_t N2, int sign);
-ffts_plan_t *ffts_init_nd_real(int rank, size_t *Ns, int sign);
+ The multi-dimensional arrays passed are expected to be
+ stored as a single contiguous block in row-major order
+*/
+FFTS_API ffts_plan_t*
+ffts_init_1d(size_t N, int sign);
-void ffts_execute(ffts_plan_t * , const void *input, void *output);
-void ffts_free(ffts_plan_t *);
+FFTS_API ffts_plan_t*
+ffts_init_2d(size_t N1, size_t N2, int sign);
-#ifdef __cplusplus
-} /* extern "C" */
-#endif /* __cplusplus */
+FFTS_API ffts_plan_t*
+ffts_init_nd(int rank, size_t *Ns, int sign);
+/* For real transforms, sign == FFTS_FORWARD implies a real-to-complex
+ forwards tranform, and sign == FFTS_BACKWARD implies a complex-to-real
+ backwards transform.
+
+ The output of a real-to-complex transform is N/2+1 complex numbers,
+ where the redundant outputs have been omitted.
+*/
+FFTS_API ffts_plan_t*
+ffts_init_1d_real(size_t N, int sign);
+
+FFTS_API ffts_plan_t*
+ffts_init_2d_real(size_t N1, size_t N2, int sign);
+
+FFTS_API ffts_plan_t*
+ffts_init_nd_real(int rank, size_t *Ns, int sign);
+
+FFTS_API void
+ffts_execute(ffts_plan_t *p, const void *input, void *output);
+
+FFTS_API void
+ffts_free(ffts_plan_t *p);
+
+#ifdef __cplusplus
+}
#endif
-// vim: set autoindent noexpandtab tabstop=3 shiftwidth=3:
+
+#endif /* FFTS_H */
diff --git a/src/arch/.gitignore b/src/arch/.gitignore
new file mode 100644
index 0000000..16c9840
--- /dev/null
+++ b/src/arch/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.la
+/*.lo
diff --git a/src/arch/ChangeLog b/src/arch/ChangeLog
new file mode 100644
index 0000000..c42aa63
--- /dev/null
+++ b/src/arch/ChangeLog
@@ -0,0 +1,4805 @@
+commit e8fa461503cf681fd7f6fffdbe94346cb4a0b94f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 13 13:56:18 2014 -0400
+
+ [runtime] Remove an unused interpreter file.
+
+commit b8e69265771d2d730847add35620628ff003aed1
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Sep 9 09:14:37 2014 -0400
+
+ [cleanup] Remove more old files.
+
+commit 69d89956fcc24cec955246588269cb7c8012b7cb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Sep 1 13:25:07 2014 -0400
+
+ [runtime] Remove the interpreter.
+
+commit a9db0d5b41d17cb7ff5788a63ce0eee1e01652b3
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Tue Jun 3 11:52:00 2014 -0400
+
+ Architectural level set to z10 instruction set
+
+commit edeeadda807c9189ad6b7cdd0f221c355ad95e52
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue Apr 29 16:56:12 2014 +0200
+
+ Add .gitignore file in mono/arch/arm64.
+
+commit 62b813772cfa4af873a278c39dd1f01dc6e50c2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 20:16:47 2014 +0200
+
+ [arm64] Add JIT support.
+
+commit 1d58ec09524d6f4ce37f39698e68fb45a3c0231b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 17:03:21 2014 +0200
+
+ [arm64] Add basic port infrastructure.
+
+commit 12741090edd2230bfd0fac498af3e304680380b4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 1 18:39:05 2014 +0000
+
+ [jit] Implement support for atomic intrinsics on arm.
+
+commit 21ca1bad7d0447bb5d420a58128e1c2733635efa
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Dec 11 11:13:14 2013 -0500
+
+ [arch]Add cvtsi2ss to amd64 codegen.
+
+commit 4a25d5fa1811be15c62979993cd1a37c2891d0a5
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Sat Nov 23 18:26:55 2013 +0100
+
+ Fix the encoding of x86_imul_reg_mem_imm.
+
+commit 43b05e3c36d05526f7a9f3f8767569d026e4f1c6
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Fri Nov 15 15:08:06 2013 +0100
+
+ Fix the `nop` opcode on some MIPS-based Loongson CPUs.
+
+ After much trouble building Mono in Debian/MIPS, @directhex
+ narrowed it down to this issue:
+
+ https://sourceware.org/ml/binutils/2009-11/msg00387.html
+
+ So since some of the 2E and 2F versions of the Loongson CPUs
+ break with a regular `sll zero, zero, 0` we need to issue an
+ `or at, at, 0`. This makes sure we don't randomly deadlock or
+ blow up when the CPU is under heavy load.
+
+ Yes, really.
+
+commit 2f56d471f089b8f514377ce501a0c1643652d639
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 24 23:41:39 2013 +0200
+
+ Merge some Nacl/ARM changes from https://github.com/igotti-google/mono/commit/65d8d68e8c81cf6adb1076de7a9425c84cab86a3.
+
+commit ab6a96ef346220433f9f7967b763a0453d9cbc66
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue May 14 18:27:32 2013 +0200
+
+ Enable hw division/remainder on mt in non-thumb mode as well.
+
+commit 78c1e65942210449d0d1c4957b42242ebc9bdb5a
+Author: Alex Rønne Petersen <alexrp@xamarin.com>
+Date: Tue May 14 03:10:43 2013 +0200
+
+ Kill support for the ancient FPA format on ARM.
+
+commit a42bc8f14a3393150fb6fbb772c2b0259267f5ae
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Thu Apr 25 10:01:14 2013 -0400
+
+ Add lazy rgctx support to s390x
+
+commit 92b3dc346aad94e7e6a91e7356adcebbb180c618
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 22 17:54:27 2013 +0200
+
+ Remove obsolete 32 bit s390 backend.
+
+commit 0d9d79945bfc7e791ed39e7519b8769a3c09fe28
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Thu Jan 31 12:48:49 2013 -0800
+
+ NaCl GC improvements
+
+ - inline managed code implementation
+ (add x86 test mem imm8 codegen macro for this as well)
+ - clean up libgc NaCl code
+ - centralize mono_nacl_gc into mini.c
+
+commit a2b380c30f8e12e508d9b761b9b049d17dff3617
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 1 20:27:07 2013 +0100
+
+ Remove the unmaintained and incomplete alpha backend.
+
+commit ddee8bb5125ad07f673a5f9a45ddc629dec8c126
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Feb 26 22:08:26 2013 +0100
+
+ Remove the unmaintained and incomplete hppa backend.
+
+commit 9c434db79ba98565a8dadcfbbe8737621a698589
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 9 17:23:38 2012 -0400
+
+ Use full path for includes as this was braking the cross compiler.
+
+commit 600580c96563f5702acee5a0307432e96731d837
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Oct 4 13:03:06 2012 +0200
+
+ Save fp registers in the ARM throw trampoline, ios has callee saved fp registers, and LLVM generates code which uses them.
+
+commit 0b64268e0a56e3f76063f0b679975be0daaf68b1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Oct 3 10:26:37 2012 +0200
+
+ Use AM_CPPFLAGS instead of INCLUDES in Makefile.am files, as the later is no longer supported, see http://lists.gnu.org/archive/html/automake/2012-08/msg00087.html.
+
+commit f2e43c392dde726d2f1008dfcc8515d34354e968
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 19 01:37:26 2012 +0000
+
+ Save/restore fp registers in MonoContext on ios. Fixes #1949.
+
+commit a841c76b86e38fc8e5db24f152b5fab2501ddf1a
+Author: Iain Lane <iain@orangesquash.org.uk>
+Date: Sun Apr 15 14:49:55 2012 +0100
+
+ Fix ARM printf format problems
+
+ When building with -Werror=format-security on ARM, mono fails to build
+ due to incorrect format strings in arm-dis.c
+
+commit 33426abe6bd7ad8eb37d2f214afe08a0a3d70a0b
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Mon Apr 2 13:30:43 2012 -0400
+
+ s390x-codegen.h - Define s390_SP and s390_BP
+ sgen-major-copy-object.h - Correct assertion test
+ sgen-os-posix.c - Prevent race condition between restarting and suspending a thread
+
+commit c565eab0f9d79f6009c3878eaa190529838b0204
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Mar 12 16:15:46 2012 -0400
+
+ Update some copyrights
+
+commit d711efe0d6403fa49697c304696843a789805112
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Dec 2 06:20:16 2011 +0000
+
+ Ongoing MIPS work. Fix mips_load () to be patchable, fix endianness issue in OP_MIPS_MFC1D, fix OP_JMP. make rcheck runs now.
+
+commit 32a164a381080aee3afa42ea33e31d89579519a4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 16 04:35:31 2011 -0500
+
+ Revert "Add support for hardfp abi on ARM."
+
+ This reverts commit e7055b45b9211fb20021997f7da0fa24992421f5.
+
+commit aaae806b8bd16a82937c9417689aeb82bea0b952
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Nov 9 10:25:48 2011 -0500
+
+ Update two days worth of copyrights, many more missing
+
+commit 96e5ba7724999828facefb30e0982d0be6931bda
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Nov 9 01:13:16 2011 +0100
+
+ Add support for hardfp abi on ARM.
+
+commit c6d53e16991eb2dcc3e4d99a008fdd899d2b78f2
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Fri Aug 5 17:02:45 2011 +0200
+
+ Fix up bugs in x86-codegen for NaCl.
+
+commit 8034d4b8f49485babcbffd12d3e09fd372c00ccb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 6 16:16:16 2011 +0200
+
+ Prefix ARM FPA codegen macros with 'FPA'.
+
+commit d2a95b8feb24584dd528b3deb0f5f1ec5d7766a3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 23 21:33:43 2011 +0200
+
+ Fix out-of-tree builds on arm.
+
+commit d093f6fff2bcaa4ccfc795354b151c7ca1a0c613
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Fri May 6 12:52:19 2011 -0400
+
+ Implement soft debugger for s390x and fix context macro for s390x
+
+commit 4c9723aa3efac03bc33deed252ebda71cbb1ae86
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 12:14:52 2011 +0100
+
+ Fix some warnings.
+
+commit b1a613aca13e03185d0ba49e46fd77fd8eb98fc9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 03:22:52 2011 +0100
+
+ Implement mono_memory_barrier () and OP_MEMORY_BARRIER for ARM.
+
+commit f81e3005a53a10c39f4ca8dd30a2a88719c7d005
+Author: Neale Ferguson <neale@sinenomine.net>
+Date: Sun Jan 16 23:40:23 2011 -0500
+
+ Cast result of s390x_emit16/32 to eliminate lots of warning messages
+ Check for wrapper-managed-to-native when assessing call parameters and have emit_prolog use native_size when processing those parameters
+ Signed-off-by: Neale Ferguson <neale@sinenomine.net>
+
+commit 92a55ae009739b5ec652676b8fdd615375c27fc0
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:52:46 2011 +0000
+
+ Implement mono.simd new conversion ops on amd64
+
+commit b7639e01d7603a1e34dd225edb5e99fd2181494b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 10 10:40:12 2011 +0100
+
+ Implement a few conversion operations.
+
+ Add conversion operations between 4f, 2d and 4i.
+ Implemented only on x86 for now.
+
+commit f0e5c2be6946491ba052c82794361ec0d33cb04c
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Jan 7 00:19:03 2011 +0000
+
+ AMD64 version of the new mono.simd ops
+
+commit 1aa6254fb828e043ea55d7d3e37b02812e2d9bdf
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 21:36:31 2011 +0100
+
+ Implement Shuffle for 64bits types.
+
+ * x86-codegen.h: Add macro and define to emit pshufpd.
+
+ * mini-ops.h: Add OP_SHUPD.
+
+ * cpu-x86.md:
+ * mini-x86.h: Implement x86 support.
+
+ * simd-intrinsics.c: Handle shuffle on 64bit types.
+
+ * VectorOperations.cs: Add new methods.
+
+commit c1fb94e7e72e58924dcebe8cdfcdbcbe1e65b644
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Jan 6 18:43:59 2011 +0100
+
+ Add SHUFPS and macro to emit it.
+
+commit 48f5efeb334eb4b6e867c65ae53e21b3c45fd771
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 19:35:45 2011 +0100
+
+ Put back a macro definition accidently removed by the nacl changes.
+
+commit a7074ea55af096913e4bcc8e044be7601bcc55b5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 6 11:49:32 2011 +0100
+
+ Fix warnings introduced by the NACL merge.
+
+commit 4edb45273377cc0858dab7e12b19026467e796c5
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 16:03:45 2010 -0800
+
+ Merge mono/io-layer, mono/metadata, mono/arch/x86 and configure.in for Native Client
+
+commit cfdf246cd2ffd65bd25e09f1d66bb55d57bf8953
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Tue Dec 14 14:37:36 2010 -0800
+
+ Changes to mono/arch/amd64 for Native Client
+
+commit aa974c33a3cee416fc456053164835acbf81df70
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Sep 24 11:28:46 2010 -0300
+
+ Implement amd64 support for OP_CARDTABLE.
+
+ * amd64-codegen.h (amd64_alu_reg_membase_size): Add support
+ for RIP based addressing.
+
+ * cpu-amd64.md: Add card_table_wbarrier.
+
+ * mini-amd64.c (mono_arch_output_basic_block): Emit the
+ new OP.
+
+ * mini-amd64.c (mono_arch_emit_exceptions): Handle another
+ kind of patch-info - GC_CARD_TABLE_ADDR. This is required
+ because we can neither have 64bits immediates with amd64
+ or 2 scratch regiters with current regalloc.
+
+ * mini-amd64.h: Define MONO_ARCH_HAVE_CARD_TABLE_WBARRIER.
+
+commit 7981b77489eba9fafe98b764ae8c423143e55a25
+Author: Mark Mason <mmason@upwardaccess.com>
+Date: Wed Aug 18 23:39:36 2010 +0800
+
+ Simplify test for MIPS imm16 operands.
+
+ Code contributed under the MIT/X11 license.
+
+commit 881a8fe8dfebf42e0f50228319132001d121c983
+Author: Elijah Taylor <elijahtaylor@google.com>
+Date: Mon Aug 9 17:40:18 2010 +0200
+
+ Add hooks to the codegen macros to support NACL codegen.
+
+commit da52cebbb28392e8043a36e8c29f4ceb4f706741
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Sun Jul 25 20:09:25 2010 +0530
+
+ EOL handling
+
+ This set of .gitattributes was automatically generated from the list of files
+ that GIT tried to normalize when I enabled automatic EOL conversion.
+
+ With this set of attributes, we prevent automated EOL conversion on files that
+ we know will cause trouble down the road.
+
+commit 80806328ee52ed52783e005f044e8447d34efac5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 19 02:35:46 2010 +0000
+
+ 2010-05-19 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load_func): Fix ilp32 support.
+
+ svn path=/trunk/mono/; revision=157521
+
+commit bb66b04f8ca017660ae65afa4b86a33b32d48cdb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 8 04:41:44 2010 +0000
+
+ .gitignore
+
+ svn path=/trunk/mono/; revision=155025
+
+commit 2b562993a3dced62eb48aeedcf38f234b655e86f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 29 23:21:23 2010 +0000
+
+ 2010-03-30 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/*.sh: Remove bash dependency.
+
+ svn path=/trunk/mono/; revision=154407
+
+commit 977db7f5b92aa4e7b8909f6d2440f3347e548364
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Mar 23 20:00:46 2010 +0000
+
+ Primarily, add support for mono_arch_get_throw_corlib_exception and IMT
+ for s390x. Other s390x fixes to instruction sizes, parameter passing, and ARCH
+ settings.
+
+
+ svn path=/trunk/mono/; revision=154085
+
+commit 282ce11cd7691698334563b95ca4b49e6c32f900
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Nov 20 22:34:30 2009 +0000
+
+ removing PLATFORM_WIN32
+
+ svn path=/trunk/mono/; revision=146652
+
+commit 774d55350115d1c4f08dc2a9b015e9502d796cef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 10 00:58:49 2009 +0000
+
+ 2009-11-10 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the names of the LDMIA/STMIA macros, they don't actually
+ update the base register.
+
+ svn path=/trunk/mono/; revision=145786
+
+commit 568b4a7ab726e87c664a682193fa57c5521ed23c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 14 13:49:01 2009 +0000
+
+ 2009-08-14 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Add armv6 MOVW/MOVT.
+
+ svn path=/trunk/mono/; revision=139918
+
+commit c4d98f3131b6b7d0732050c2e0ac7bd05b6c27c2
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Tue Aug 4 00:31:14 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * mono/arch/amd64/amd64-codegen.h: Added missing code gen marco for single packed square root.
+ * mono/mini/basic-simd.cs: added test for packed double square root.
+ * mono/mini/cpu-amd64.md: added opcode info for packed double square root.
+ * mono/mini/cpu-x86.md: added opcode info for packed double square root.
+ * mono/mini/mini-ops.h: added IR opcode for packed double square root.
+ * mono/mini/mini-x86.c: added IR to native translation code for packed double square root.
+ * mono/mini/mini-amd64.c: removed todo for packed double square root.
+ * mono/mini/simd-intrinsics.c: added method to IR opcode converstion for
+ packed double square root.
+
+ svn path=/trunk/mono/; revision=139309
+
+commit fc5d2d293fe800d860e9af4fcd9b19f9be7d4e17
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 24 15:00:25 2009 +0000
+
+ Fri Jul 24 16:54:13 CEST 2009 Steven Munroe <munroesj@us.ibm.com>
+
+ This patch is contributed under the terms of the MIT/X11 license
+
+ * arch/ppc/ppc-codegen.h (ppc_ha): Define high adjusted
+ conversion to support combining addis for bits 32-47 with
+ signed load/store diplacements for bits 48-63.
+ (ppc_fcfidx, ppc_fctidx, ppc_fctidzx): Share with PPC32.
+ These instructions are availble to 32-bit programs on 64-bit
+ hardware and 32-bit both starting with PowerISA V2.01.
+ [__mono_ppc64__]: Define ppc_mftgpr and ppc_mffgpr for Power6
+ native mode.
+ [!__mono_ppc64__]: Define ppc_is_imm32 as constant true for
+ ppc32.
+
+
+ svn path=/trunk/mono/; revision=138635
+
+commit f44bc9e40cc840bf63bf782aa0338aae3e898f7f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 20 20:45:49 2009 +0000
+
+ 2009-07-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_sse_pminud_reg_reg): Fix the encoding
+ of this instruction.
+
+ svn path=/trunk/mono/; revision=138242
+
+commit 88ccf5c589b23d6e79ea5a588d3986693b09879a
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 13 21:58:58 2009 +0000
+
+ 2009-07-13 Zoltan Varga <vargaz@gmail.com>
+
+ * x86/x86-codegen.h: Applied patch from Marian Salaj <salo3@atlas.cz>.
+ Fix encoding of PMINSW and PMINSD. Fixes #521662.
+
+ svn path=/trunk/mono/; revision=137821
+
+commit 64d366eddf3b1c93bcaaff2190fa1cc2b01f7f03
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jul 10 22:35:07 2009 +0000
+
+ Contributed under the terms of the MIT/X11 license by
+ Jerry Maine <crashfourit@gail.com>.
+
+ * amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+ * amd64/amd64-codegen.h: Fix bugs in simd marcos.
+
+ svn path=/trunk/mono/; revision=137736
+
+commit d7fa5cedae9e4859b340ee29e997dfd48b45ce6e
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:25:11 2009 +0000
+
+ Fix wrong date in my entry to ChangeLog files. Sorry! :((
+
+ svn path=/trunk/mono/; revision=136786
+
+commit 1c634ebda21ddf5392c9d8edd030323d1ad85962
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Wed Jun 24 21:19:29 2009 +0000
+
+ mini-amd64.c: Added code to convert simd IR to native amd64 sse.
+ amd64/amd64-codegen.h: Add marcos for coding several specific sse opcodes.
+
+ svn path=/trunk/mono/; revision=136785
+
+commit bb994071dcc42ba150d88776fe70f8d35fc522a9
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jun 23 23:55:26 2009 +0000
+
+ Fix LCONV_TO_xx and ICONV_TO_xx. Fix leave_method dump of returned
+ structure. Fix formatting.
+ Correct instruction lengths.
+ Add new instructions.
+
+ svn path=/trunk/mono/; revision=136748
+
+commit f48a4f5a13745caf5350d6f190efb97ec6b605ef
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:25:02 2009 +0000
+
+ Fix a few uses of ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136607
+
+commit 4ecc9d712b82d78c853e574edc0345c85bfcd660
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:24:56 2009 +0000
+
+ Fix a few uses of ppc_load_reg/ppc_store_reg.
+
+ svn path=/trunk/mono/; revision=136606
+
+commit 40c668ecb1553ffb7b6575b439b3ff8420265cd8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jun 22 15:22:10 2009 +0000
+
+ 2009-06-22 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Rework the naming of the load/store macros,
+ ldr/str now handle register sized quantities, while ldptr/stptr handle
+ pointer sized quantities.
+
+ svn path=/trunk/mono/; revision=136604
+
+commit cf0e113f7dd91ff8b46e35047cc48c2e5ece925c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 18:47:03 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Fix the last change to avoid self-assignments inside
+ macros.
+
+ svn path=/trunk/mono/; revision=136548
+
+commit 3858973d0bd980206ea3725a2e74f2a336aa1aa1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jun 20 13:04:42 2009 +0000
+
+ 2009-06-20 Zoltan Varga <vargaz@gmail.com>
+
+ * ppc/ppc-codegen.h: Add ppc_ldr/ppc_str macros to store regsize quantities.
+ Handle little endian host platforms in ppc_emit32.
+
+ svn path=/trunk/mono/; revision=136539
+
+commit 9629536810d07a63b980a29912eaf3df7313fee9
+Author: Jerri Maine <crashfourit@gmail.com>
+Date: Fri Jun 12 17:33:11 2009 +0000
+
+ Add marcos for coding two byte SIMD/SSE opcodes. Added comments to help tell the different types of SSE code gen marcos appart.
+
+ svn path=/trunk/mono/; revision=136018
+
+commit 76cddabf0319c7be9fae2b6c532aafe6587fafbc
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Apr 22 23:59:10 2009 +0000
+
+ merge
+
+ svn path=/trunk/mono/; revision=132427
+
+commit 965b554666f2999b9e01dd731b1134af1cfcd5fa
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 6 15:09:57 2009 +0000
+
+ 2009-04-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add ARM_FSITOS/ARM_FSITOD.
+
+ svn path=/trunk/mono/; revision=131125
+
+commit 7b7235494cabe7c5a796fafd6297070f993b03a8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 22:37:35 2009 +0000
+
+ 2009-04-03 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Add macros for decoding the SIB byte.
+
+ svn path=/trunk/mono/; revision=130910
+
+commit 9f497af70ef5ed9244ffbe9a6263f7d077136148
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Apr 2 00:50:47 2009 +0000
+
+ 2009-04-02 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-vfp-codegen.h: Add missing VFP codegen macros.
+
+ svn path=/trunk/mono/; revision=130817
+
+commit 7c682141c5861685e5b0efdcc1f337083657cf9d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Mar 6 15:55:12 2009 +0000
+
+ 2009-03-06 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/tramp.c: Include a change from the debian patches. Avoid #include-ing
+ a file in the middle of a function.
+
+ svn path=/trunk/mono/; revision=128782
+
+commit a7f6dd7620d7c440216c0f156bcd969a28a592d4
+Author: Martin Baulig <martin@novell.com>
+Date: Sat Feb 28 14:36:50 2009 +0000
+
+ Create .gitignore's.
+
+ svn path=/trunk/mono/; revision=128265
+
+commit 22e6e9728faa11a87a7f6f0f0ff0f0f8ef754c03
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 27 06:21:52 2009 +0000
+
+ 2009-02-27 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/{arm_fpamacros.h, arm_vfpmacros.h}: Remove these files, they are
+ autogenerated.
+
+ svn path=/trunk/mono/; revision=128179
+
+commit c70f15fc12afeb73f19d4ff18cf11b7289d76c4f
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Feb 2 23:32:58 2009 +0000
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * ppc/ppc-codegen.h: Make operand order and case consistent
+ (assembler order) for ppc_load_reg_update, ppc_load_multiple_regs,
+ ppc_store_multiple_regs, ppc_lwz, ppc_lhz, ppc_lbz,
+ ppc_stw,ppc_sth, ppc_stb, ppc_stwu, ppc_lbzu, ppc_lfdu, ppc_lfsu,
+ ppc_lfsux, ppc_lfsx, ppc_lha, ppc_lhau, ppc_lhzu, ppc_lmw,
+ ppc_lwzu, ppc_stbu, ppc_stfdu, ppc_stfsu, ppc_sthu, ppc_stmw. Use
+ "i" or "ui" instead of "d" for immediated operands to immediate
+ arthimetic and logical instructions in macros ppc_addi, ppc_addis,
+ ppc_ori, ppc_addic, ppc_addicd, ppc_andid, ppc_andisd.
+ [__mono_ppc64__]: Make operand order and case consistent
+ (assembler order) for ppc_load_multiple_regs,
+ ppc_store_multiple_regs. Simplify the DS form and make them
+ consistent with D forms for ppc_load_reg, ppc_load_reg_update,
+ ppc_store_reg, ppc_store_reg_update. ppc_ld, ppc_lwa, ppc_ldu,
+ ppc_std, ppc_stdu. Define ppc_lwax and ppc_lwaux.
+
+ 2009-02-02 Mark Probst <mark.probst@gmail.com>
+
+ Contributed under the terms of the MIT/X11 license by Steven
+ Munroe <munroesj@us.ibm.com>.
+
+ * exceptions-ppc.c (restore_regs_from_context): Correct operand
+ order (offset then base reg) for ppc_load_multiple_regs.
+ (emit_save_saved_regs) Correct operand order for
+ ppc_store_multiple_regs.
+ (mono_arch_get_call_filter): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * mini-ppc.c (emit_memcpy): Fix operand order for
+ ppc_load_reg_update and ppc_store_reg_update.
+ (mono_arch_output_basic_block): Correct operand order for ppc_lha.
+ (mono_arch_emit_epilog): Correct operand order for
+ ppc_load_multiple_regs.
+
+ * tramp-ppc.c (mono_arch_create_trampoline_code): Correct operand
+ order for ppc_store_multiple_regs and ppc_load_multiple_regs.
+
+ svn path=/trunk/mono/; revision=125443
+
+commit f228d47d2afc549321cec800466e6bc1cde631bb
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Jan 19 19:47:54 2009 +0000
+
+ 2009-01-19 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add x86_movd_xreg_membase.
+
+ svn path=/trunk/mono/; revision=123825
+
+commit 792160756d6ef76711408f151838c3f5a5f8d83b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 19 19:46:04 2008 +0000
+
+ 2008-12-19 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Fixed the argument order for lwzu in
+ ppc_load_reg_update.
+
+ svn path=/trunk/mono/; revision=121883
+
+commit 344a06253c9c1bad287e160b9714b0a052e68a09
+Author: Mark Mason <glowingpurple@gmail.com>
+Date: Sat Dec 13 06:54:25 2008 +0000
+
+ 2008-12-12 Mark Mason <mmason@upwardaccess.com>
+
+ * mips/mips-codegen.h: Changes to support n32.
+
+ Contributed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=121488
+
+commit 2dcc1868b2e2e830a9fa84a445ee79a8f6ab38ba
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Wed Dec 10 09:33:57 2008 +0000
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Merged with mini-ppc64.c.
+
+ * mini-ppc.h: Define PPC_MINIMAL_PARAM_AREA_SIZE on all targets.
+
+ * Makefile.am: Use the same sources for PPC and PPC64.
+
+ * mini-ppc64.c: Removed.
+
+ 2008-12-10 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few new macros for the final PPC/PPC64
+ merge.
+
+ svn path=/trunk/mono/; revision=121203
+
+commit 77eff8936b5e423be2712ba66cd8baba0edd2795
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 20:57:02 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Some simple merges from mini-ppc64.c.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: ppc_load_func must use ppc_load_sequence.
+ Added ppc_compare_log.
+
+ svn path=/trunk/mono/; revision=120890
+
+commit dd397c9fd311f0411694ff1cc7904aec14f4551b
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Dec 5 16:42:24 2008 +0000
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc.c, mini-ppc.c, mini-ppc.h: Merged tramp-ppc.c with
+ tramp-ppc64.c.
+
+ * Makefile.am: Use tramp-ppc.c instead of tramp-ppc64.c.
+
+ * tramp-ppc64.c: Removed.
+
+ 2008-12-05 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Added ppc_load_func for PPC32. Added
+ ppc_load/store_multiple_regs and ppc_compare_reg_imm.
+
+ svn path=/trunk/mono/; revision=120852
+
+commit 7f226f68fb98684dafd132d90ca1a24635c33557
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Dec 2 16:03:45 2008 +0000
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * tramp-ppc64.c (mono_arch_create_rgctx_lazy_fetch_trampoline):
+ Fix trampoline size.
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: A few floating point
+ conversion opcodes are implemented natively instead via emulation.
+
+ 2008-12-02 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Opcodes for floating point conversions from
+ 64 bit integers.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=120492
+
+commit 742361c7bfc21faf8485d20d00cdfc58c04800f9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 28 19:06:34 2008 +0000
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h: Enable generalized IMT thunks and
+ make them work.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * object.c: Don't put function descriptors into generalized IMT
+ thunks.
+
+ 2008-11-28 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: #define for the maximum length of a load
+ sequence.
+
+ svn path=/trunk/mono/; revision=120248
+
+commit b45b096d6d4246f16d05e42838122f1d58f875f6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Fri Nov 21 00:21:53 2008 +0000
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, cpu-ppc64.md: Several fixes. Now
+ PPC64 passes basic-long.exe.
+
+ 2008-11-21 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Make ppc_is_[u]imm16() work with 64 bit
+ values.
+
+ svn path=/trunk/mono/; revision=119560
+
+commit dc227de13e4f1cee33c379401adbb90a225e680a
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:45:00 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_REG_MEMBASE and renamed MOVS to MOVSD.
+
+ svn path=/trunk/mono/; revision=119549
+
+commit 01e12b57e8773f9c65c64a91f956b0fa9335d095
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 23:44:44 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVS_ reg/membase variants.
+
+ * x86/x86-codegen.h: Add x86_sse_alu_pd_reg_membase_imm.
+
+ * x86/x86-codegen.h: Sort the x86_sse_alu_* macros decently.
+
+ svn path=/trunk/mono/; revision=119545
+
+commit 96ed3f7c4ea51c61ec3b5d0600c32fa003b8e4f7
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:36:13 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * decompose.c: Decompose carry and overflow add on PPC64 like on
+ other 64 bit archs. Don't decompose sub at all on PPC64.
+
+ * mini-ppc64.c, exceptions-ppc64.c, tramp-ppc64.c, cpu-ppc64.md:
+ Several fixes and new opcodes. Now PPC64 runs (but doesn't pass)
+ basic-long.exe.
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Use ppc_load_reg instead of ppc_ld in
+ ppc_load_func to fix the 2 bit shift.
+
+ svn path=/trunk/mono/; revision=119516
+
+commit 14651d4fa6b039131000aa5157ed99b7526f89b8
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Thu Nov 20 21:27:36 2008 +0000
+
+ 2008-11-20 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: 64 bit division opcodes.
+
+ Code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119515
+
+commit daa4af175e0f8b95888918dbf429c7d5f66d3c07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 20 14:28:51 2008 +0000
+
+ 2008-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/Makefile.am (libmonoarch_arm_la_SOURCES): Don't build tramp.c, it is only
+ used by the interpreter.
+
+ svn path=/trunk/mono/; revision=119444
+
+commit 3225dc9308230de9fbbca884c05e6b150a8e0333
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 20 14:12:04 2008 +0000
+
+ 2008-11-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PEXTR B/W/D.
+
+ svn path=/trunk/mono/; revision=119441
+
+commit 5c317c4676f911a0620b54e6668cf66a5c0dda31
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:58 2008 +0000
+
+ 2008-11-18 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add PINSR B/W/D.
+
+ svn path=/trunk/mono/; revision=119229
+
+commit b31b375fc1354cc835d183e7e251e602eeb038c5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 18 21:56:49 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Fix comment about the version of PCMPGTQ.
+
+ * x86/x86-codegen.h: Add movsd constant and x86_sse_alu_sd_membase_reg
+ macro.
+
+ svn path=/trunk/mono/; revision=119227
+
+commit dbebfad82832bf895561902dd527d2e4c158c2c9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 15:32:41 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: Macro for nop added.
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc64.c, mini-ppc64.h, tramp-ppc64.c, cpu-ppc64.md: Changes
+ for PPC64. An empty program runs now.
+
+ svn path=/trunk/mono/; revision=119162
+
+commit 406790f1df77c80b5b28bcac561e7b6c6cd1a3a6
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:25:11 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: PPC64 code generation macros.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119141
+
+commit 484dbedc8136e413a77ee11938d40e713cfefcfd
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Tue Nov 18 10:17:36 2008 +0000
+
+ 2008-11-18 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few fixes and additions.
+
+ Based on code submitted by andreas.faerber@web.de at
+ https://bugzilla.novell.com/show_bug.cgi?id=324134 under the
+ X11/MIT license.
+
+ svn path=/trunk/mono/; revision=119140
+
+commit 74b70bd5f7bc3b40a919c6c8b06c22facae8df6b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 17 17:00:22 2008 +0000
+
+ 2008-11-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add X86_SSE_MOVHPD_MEMBASE_REG constant
+ and x86_sse_alu_pd_membase_reg/x86_sse_alu_membase_reg macros.
+
+ svn path=/trunk/mono/; revision=119057
+
+commit 59483983e37bb55af19f4e98e3de2f1ad216989b
+Author: Andreas Färber <afaerber@mono-cvs.ximian.com>
+Date: Sat Nov 15 10:59:47 2008 +0000
+
+ 2008-11-15 Andreas Faerber <andreas.faerber@web.de>
+
+ * ppc/test.c: Add support for Mac OS X.
+
+ This commit is licensed under the MIT X11 license.
+
+ svn path=/trunk/mono/; revision=118924
+
+commit 6c930cb35aa08e10abba989d9cb8560b4636ba73
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Nov 13 22:51:27 2008 +0000
+
+ 2008-11-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Remove not used macro x86_pshufd_reg_reg.
+
+ svn path=/trunk/mono/; revision=118779
+
+commit bfe79f71f1352fbbfb696de3b0c093562b6fefb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Nov 4 20:17:31 2008 +0000
+
+ 2008-11-04 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add store nta.
+
+ svn path=/trunk/mono/; revision=117921
+
+commit 42f47d048391da1619aa26b70e54980c4c33e3f2
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Nov 3 14:41:44 2008 +0000
+
+ 2008-11-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add prefetch instruction
+ and x86_sse_alu_reg_membase macro.
+
+ svn path=/trunk/mono/; revision=117753
+
+commit eaf2804839ffb61912a8eeef7c3a58463aafcdd6
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 19:24:34 2008 +0000
+
+ 2008-10-28 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add long version of the packed integer
+ ops.
+
+ svn path=/trunk/mono/; revision=117292
+
+commit 3fffcb4ac5879f2655ee3b4b3bee093a9eaa5016
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 28 00:05:56 2008 +0000
+
+ 2008-10-27 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movddup.
+
+ svn path=/trunk/mono/; revision=117220
+
+commit bf9bec59fad96b9a7cb38921c26bb1c176fe40ce
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 21:58:17 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed pack with saturation.
+
+ svn path=/trunk/mono/; revision=116995
+
+commit 2ffed07a8205616ea4a1605338f08c8ad6c77432
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 13:36:53 2008 +0000
+
+ 2008-10-24 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed mul high.
+
+ svn path=/trunk/mono/; revision=116936
+
+commit 2b6070d8bbd583f6bb90e02f3961252ef0854da8
+Author: Gonzalo Paniagua Javier <gonzalo.mono@gmail.com>
+Date: Fri Oct 24 01:02:49 2008 +0000
+
+ remove temporary/generated files
+
+ svn path=/trunk/mono/; revision=116902
+
+commit 7a2889c2ce0cfbc193324b64764a02e42f5daee8
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 24 00:35:54 2008 +0000
+
+ 2008-10-23 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add signed packed max, min, add/sub with saturation
+ and compare greater.
+
+ svn path=/trunk/mono/; revision=116896
+
+commit 600a42f70b41a94712aac746e44f2bba885dfc1f
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 20 19:36:04 2008 +0000
+
+ 2008-10-20 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add multiply and store high.
+
+ svn path=/trunk/mono/; revision=116545
+
+commit 454b5617264c1bb64ff7296669db98a14cc58118
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 17 17:41:14 2008 +0000
+
+ 2008-10-17 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int shuffle.
+
+ svn path=/trunk/mono/; revision=116265
+
+commit 8336fe34234402529da0e46af634948d678ee649
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 16 23:22:27 2008 +0000
+
+ 2008-10-16 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int compare equals and
+ psabw.
+
+ svn path=/trunk/mono/; revision=116117
+
+commit 0a6e6df8d766d7ad1b21d6c234826293d1317979
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Wed Oct 15 20:52:54 2008 +0000
+
+ 2008-10-15 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add packed int max/min/avg/shuffle and extract mask.
+
+ svn path=/trunk/mono/; revision=115919
+
+commit ec2240eaee83b7c5ff444e0708a114458394d55b
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Tue Oct 14 15:02:05 2008 +0000
+
+ 2008-10-14 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add movsldup and movshdup.
+
+ svn path=/trunk/mono/; revision=115785
+
+commit 7ed9633867d31f5dd5fd971611f952574c005a87
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 22:13:15 2008 +0000
+
+ 2008-10-13 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add remaining FP sse1 ops.
+ Add sse ps encoding with imm operand.
+ Add remaining sse1 ops.
+
+ svn path=/trunk/mono/; revision=115699
+
+commit 18f1e82ca6ebaf0929f654a56ab9ddfadfacacb5
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Mon Oct 13 01:13:10 2008 +0000
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macro for sse41 ops.
+ Add defined for pack ops, dword shifts/mul/pack.
+
+ 2008-10-12 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * basic-simd.cs: Remove PackWithUnsignedSaturation tests as it turns out
+ that the packuswb/packusdw don't work with unsigned numbers for what
+ would be negative numbers in signed format.
+
+ * cpu-x86.md: Add doubleword forms of many ops and packing ones.
+ Fix the len of fconv_to_r8_x and xconv_r8_to_i4.
+
+ * mini-ops.h: Add doubleword forms of many ops and packing ones.
+
+ * mini-x86.c: Emit doubleword forms of many ops and packing ones.
+
+ * simd-intrinsics.c (SimdIntrinsc): Rename the flags field to simd_version.
+
+ * simd-intrinsics.c (vector4f_intrinsics): Use simd_version field for sse3 ops.
+
+ * simd-intrinsics.c (vector4u_intrinsics): Rename to vector4ui_intrinsics and
+ add more ops.
+
+ * simd-intrinsics.c (simd_version_name): New function, returns the name of the
+ version as the enum in mini.h.
+
+ * simd-intrinsics.c (emit_intrinsics): Instead of having a special emit mode
+ for sse3 ops, check the simd_version field if present. This way the code
+ works with all versions of sse.
+
+ svn path=/trunk/mono/; revision=115610
+
+commit 494ea4f86907f393c8f0ba660edb100a107a8c80
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Oct 11 05:26:06 2008 +0000
+
+ 2008-10-11 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Fix the 64 bit support.
+
+ svn path=/trunk/mono/; revision=115509
+
+commit ba0739c0dc1dd6713f6127160dcee501b105c300
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 10 21:55:37 2008 +0000
+
+ 2008-10-10 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Handle non 32-bit targets.
+
+ svn path=/trunk/mono/; revision=115494
+
+commit 5de452f7ff84e26bd22b86205a1cdb9fc207fe75
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Thu Oct 9 18:28:16 2008 +0000
+
+ 2008-10-09 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros for sse shift, pack, unpack,
+ saturated math and packed byte/word/dword math.
+
+ svn path=/trunk/mono/; revision=115367
+
+commit 922c5a03dc6cd66147b1c6bfeb8c1045176618da
+Author: Rodrigo Kumpera <kumpera@gmail.com>
+Date: Fri Oct 3 14:28:09 2008 +0000
+
+ 2008-10-03 Rodrigo Kumpera <rkumpera@novell.com>
+
+ * x86/x86-codegen.h: Add macros and enum for SSE instructions.
+
+ svn path=/trunk/mono/; revision=114751
+
+commit f2d756dab8d08c009df41d94eb21fdf427a8e01a
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sat Sep 27 13:02:48 2008 +0000
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h: A few typecasts to fix compiler warnings.
+
+ 2008-09-27 Mark Probst <mark.probst@gmail.com>
+
+ * mini-ppc.c: Compiler warning fixes.
+
+ svn path=/trunk/mono/; revision=114279
+
+commit 386d8b482a7e399e4e8d130dd0d2d2ab405068ae
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Sun Sep 7 10:25:11 2008 +0000
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * marshal.c (mono_type_native_stack_size): Treat
+ MONO_TYPE_TYPEDBYREF like MONO_TYPE_VALUETYPE.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * method-to-ir.c (mono_method_to_ir2): Disable tail calls for PPC
+ until they're implemented properly.
+
+ * exceptions-ppc.c: Use arch-independent exception-handling code
+ instead of custom one.
+
+ * exceptions-ppc.c, mini-ppc.c, mini-ppc.h: Bug fixes and changes
+ for Linear IR.
+
+ * tramp-ppc.c, mini-ppc.c: Fixed warnings.
+
+ * decompose.c, aot-runtime.c, aot-compiler.c: PPC code also
+ applies when __powerpc__ is defined.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * libtest.c: Darwin structure alignment also applies to PPC.
+
+ 2008-09-07 Mark Probst <mark.probst@gmail.com>
+
+ * ppc/ppc-codegen.h (ppc_load): Inserted cast to fix some
+ warnings.
+
+ svn path=/trunk/mono/; revision=112455
+
+commit 5c8178c1e6cf4d2370c865c6bc66995ca1174eb9
+Author: Mark Probst <mark.probst@gmail.com>
+Date: Mon Jun 16 09:37:01 2008 +0000
+
+ 2008-06-16 Mark Probst <mark.probst@gmail.com>
+
+ * amd64/amd64-codegen.h: Removed extraneous parenthesis in a macro
+ nobody uses.
+
+ svn path=/trunk/mono/; revision=105886
+
+commit ecbcbb317678440e62a13e16820f95f6ea2dff3d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jun 6 02:08:56 2008 +0000
+
+ 2008-06-06 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_padding_size): Rewrite this to use the
+ instructions recommended by the amd64 manual.
+
+ svn path=/trunk/mono/; revision=105134
+
+commit 0ded1416da01e39a6c4a33fc9798123d4021fe4d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Apr 19 14:18:56 2008 +0000
+
+ 2008-04-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_is_imm32): Use gint64 instead of glong because of
+ win64.
+
+ svn path=/trunk/mono/; revision=101210
+
+commit cb1954322f73b8d1b0a6836c5242b05538ed72dd
+Author: Jb Evain <jbevain@gmail.com>
+Date: Sun Apr 13 11:44:22 2008 +0000
+
+ last merge 100420:100549
+
+ svn path=/branches/jb/ml2/mono/; revision=100550
+
+commit a977d5e7585e338491944fc87b5e018891eedd93
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Wed Mar 12 17:08:32 2008 +0000
+
+ In .:
+ 2008-03-13 Geoff Norton <gnorton@novell.com>
+
+ * arch/arm/tramp.c: Dont compile this on PLATFORM_MACOSX
+
+
+ svn path=/trunk/mono/; revision=98063
+
+commit 8c6ca9f3fda169feccab289ecd181e06bcc8e133
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 18 18:25:24 2008 +0000
+
+ 2008-02-18 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_movsxd_reg_mem): New codegen macro.
+
+ svn path=/trunk/mono/; revision=96092
+
+commit 7a7cef000b9d59672b47c0fcdf75bd1fc00b8c78
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 14 14:21:56 2008 +0000
+
+ 2008-02-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_membase8_imm_size): New codegen macro.
+
+ svn path=/trunk/mono/; revision=95633
+
+commit 9cbc23b5ee9e4f2dca88f8418d11be97079c25a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Feb 8 14:28:06 2008 +0000
+
+ 2008-02-08 Zoltan Varga <vargaz@gmail.com>
+
+ * arm/arm-codegen.h: Fix the ordering of arguments for some load/store opcodes
+ so they are consistent.
+
+ svn path=/trunk/mono/; revision=95254
+
+commit b951542a9ead8a408c6560a0ffad28a5ade9670d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:12:46 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Or if INTERP_SUPPORTED is true.
+
+ svn path=/trunk/mono/; revision=93834
+
+commit 95aa5dc93dbfbcf10125032ecde0e5eabc969a98
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jan 24 20:10:14 2008 +0000
+
+ 2008-01-24 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Only set this on arm.
+
+ svn path=/trunk/mono/; revision=93833
+
+commit 11c84542edf07ed41b831c12058f9a0bdd83df93
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Nov 20 17:45:36 2007 +0000
+
+ 2007-11-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_alu_reg_imm_size): Prefer the smaller
+ instruction encoding.
+
+ svn path=/trunk/mono/; revision=90005
+
+commit b15fabef0c7798e4850432910d97e0249cd691fc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Nov 10 15:22:00 2007 +0000
+
+ 2007-11-03 David S. Miller <davem@davemloft.net>
+
+ * sparc/sparc-codegen.h (sparc_set32, sparc_set): A plain sethi
+ can be used if the constant value only has the top 22 bits set.
+
+ svn path=/trunk/mono/; revision=89409
+
+commit e22c1134d1553f6da21c1ef50ab4afb009d7c215
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Mon Nov 5 22:28:08 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * x86/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+
+ svn path=/trunk/mono/; revision=88931
+
+commit ad3b3601f5c113df825c3d2e09fb03b5aa4d1208
+Author: Geoff Norton <grompf@sublimeintervention.com>
+Date: Thu Nov 1 19:03:16 2007 +0000
+
+ 2007-11-01 Geoff Norton <gnorton@novell.com>
+
+ * ppc/Makefile.am: Only compile tramp.c if INTERP_SUPPORTED is true
+ Fixes the build on Leopard.
+
+ svn path=/trunk/mono/; revision=88673
+
+commit 8991f4a9503167171a0ad5e745d71ec4bd8b846c
+Author: Jonathan Chambers <joncham@gmail.com>
+Date: Fri Oct 26 14:41:54 2007 +0000
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * mini-amd64.c: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ * mini-amd64.h: Add %rdi and %rsi to MonoLMF structure
+ on Win64. Fix intrinsic, use _AddressOfReturnAddress
+ instead of non-existant _GetAddressOfReturnAddress.
+
+ * tramp-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Save/restore %rdi and %rsi in MonoLMF.
+
+ * exceptions-amd64.c: Use AMD64_ARG_REG# defines to access
+ param registers. Modify (throw_exception) signature to take
+ %rdi and %rsi on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+ 2007-10-26 Jonathan Chambers <joncham@gmail.com>
+
+ * amd64/amd64-codegen.h: Begin Win64 port. Use AMD64_ARG_REG#
+ defines to access param registers. Replace long usage with
+ gsize as sizeof(long) != sizeof(void*) on Win64.
+
+ Code is contributed under MIT/X11 license.
+
+
+ svn path=/trunk/mono/; revision=88258
+
+commit 118f4540a2da9cdb72debfb786a9930e93f2a10b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Oct 9 00:12:58 2007 +0000
+
+ 2007-10-09 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_jump_membase_size): Remove an unneccesary
+ rex prefix which trips up valgrind.
+
+ svn path=/trunk/mono/; revision=87140
+
+commit e43f3ebed2b5b54c47b5f8ce458788dce0ef97dc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 14 14:04:54 2007 +0000
+
+ 2007-07-14 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Remove some unused rex prefixes.
+
+ svn path=/trunk/mono/; revision=81979
+
+commit 25f0e1d2bd61097c008fa88e4a114884bb6fe0c9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 4 13:17:45 2007 +0000
+
+ Wed Jul 4 15:29:07 CEST 2007 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added minimal sse instructions currently
+ needed by the JIT.
+
+
+ svn path=/trunk/mono/; revision=81331
+
+commit e971b6ec5cf03043dc227759fced05d5786964d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 13 17:41:53 2007 +0000
+
+ 2007-06-13 Randolph Chung <tausq@debian.org>
+
+ * hppa/hppa-codegen.h: Update with more instructions.
+ * hppa/tramp.c: Disable for linux since we don't support the
+ interpreter.
+
+
+ svn path=/trunk/mono/; revision=79463
+
+commit 26169bb71cd30b373975373952fb11d7a26b0cca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 20 19:41:51 2007 +0000
+
+ 2007-05-20 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h (amd64_call_reg): Remove a got prefix which isn't needed.
+
+ svn path=/trunk/mono/; revision=77730
+
+commit a024b2405701bbee2003e46a0f9b0e2c0486033c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Apr 23 11:31:33 2007 +0000
+
+ 2007-04-23 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha port work from
+ Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=76103
+
+commit 5ca5ea86f1ff85953c28e0ba3b657268cd2cdfba
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Sun Apr 15 09:11:00 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+ * mini-s390.c: Correct checking for enum type in return value processing.
+
+ svn path=/trunk/mono/; revision=75718
+
+commit 9159abc7ec906d64a15eee8e02b9e5b3f2cce87d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Thu Apr 12 20:45:34 2007 +0000
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+
+ svn path=/trunk/mono/; revision=75663
+
+commit b7fd657ee94257eeec946fa9eb11b3f60e7e33e6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 12 16:07:56 2007 +0000
+
+ Mon Mar 12 17:07:32 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * amd64/amd64-codegen.h: removed some useless size rex prefixes.
+
+
+ svn path=/trunk/mono/; revision=74128
+
+commit 0ba3e4bdd057c7a0d25767f7647a00f07683b44c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 24 20:01:27 2007 +0000
+
+ Wed Jan 24 21:00:40 CET 2007 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: fixed encoding of short/byte load/store
+ instructions with negative immediate offsets.
+
+
+ svn path=/trunk/mono/; revision=71622
+
+commit 0251f000fba5c8f99bec6c33beae0c2aabe66451
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 23 17:11:29 2007 +0000
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+ svn path=/trunk/mono/; revision=71523
+
+commit 8e25ae408b9d1836130807d3f465023347051332
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Dec 22 22:51:15 2006 +0000
+
+ Patch from Sergey Tikhonov <tsv@solvo.ru>
+
+ Mono on Alpha updates:
+
+ - Code cleanup
+ - Some hacks to support debugger
+ - updates for "linears" optimization
+
+
+ svn path=/trunk/mono/; revision=69976
+
+commit edd2746e20c982e094abfd547afad74d8e7d2302
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 20 16:37:26 2006 +0000
+
+ Mon Nov 20 17:36:45 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/arm-codegen.h: added suppot for thumb interworking instructions.
+
+
+ svn path=/trunk/mono/; revision=68201
+
+commit b63503e7c4b5ebb8baafb5b58ec69395146db022
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 15 16:00:09 2006 +0000
+
+ Wed Nov 15 16:56:53 CET 2006 Paolo Molaro <lupus@ximian.com>
+
+ * mips/*: fixes by Mark E Mason <mark.e.mason@broadcom.com>.
+
+
+ svn path=/trunk/mono/; revision=67929
+
+commit 6f8d67005785ba86e81ac930325767d0b270a070
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 10 18:42:10 2006 +0000
+
+ Typo fixes.
+
+ svn path=/trunk/mono/; revision=67683
+
+commit f99322f3ea7b7be85ac63c87c664aafb7f5e17bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Oct 11 21:34:24 2006 +0000
+
+ 2006-10-11 Sergey Tikhonov <tsv@solvo.ru>
+
+ * atomic.h: Fix atomic decrement.
+
+ * mini/cpu-alpha.md: Use native long shift insts
+
+ * mono/mono/mini/tramp-alpha.c: Implemented
+ mono_arch_patch_delegate_trampoline method
+
+ * Started work on using global registers
+
+ * Use byte/word memory load/store insts if cpu supports it
+
+ * Code clean up
+
+
+
+
+ svn path=/trunk/mono/; revision=66573
+
+commit 538fd0794b9ef24f7c765891ed682fc947cf8e85
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 13:02:59 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h: More alpha updates from Sergey Tikhonov <tsv@solvo.ru>.
+
+ svn path=/trunk/mono/; revision=65305
+
+commit 0689ca5f72fa8cb03fb1b565a31c4e2b22774a64
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 12 11:10:42 2006 +0000
+
+ Tue Sep 12 13:09:56 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: VFP floating point format code generation support.
+
+
+ svn path=/trunk/mono/; revision=65295
+
+commit deacad246a936216f09a81b9881c6780de8dd406
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Sep 12 10:05:29 2006 +0000
+
+ 2006-09-12 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Add xmpy_l/u pseudo ops.
+
+ svn path=/trunk/mono/; revision=65289
+
+commit 207e90216277d1d1ee0e6cd37f183440c8c39a26
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:10:43 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * amd64/amd64-codegen.h: Fix amd64_mov_mem_reg.
+
+ svn path=/trunk/mono/; revision=62746
+
+commit 8f58fa13418008cb86a8ba450a894b23efc4574e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 19 12:09:09 2006 +0000
+
+ 2006-07-19 Zoltan Varga <vargaz@gmail.com>
+
+ * alpha/alpha-codegen.h alpha/test.c alpha/tramp.c: Applied patch from
+ Sergey Tikhonov <tsv@solvo.ru>. Updates to alpha support.
+
+ svn path=/trunk/mono/; revision=62745
+
+commit ef8021400f045f835fcf70baf5ba5880fe6eca93
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 15 15:00:59 2006 +0000
+
+ Thu Jun 15 16:59:36 CEST 2006 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: reduce noisy build warnings by
+ casting to the more commonly used unsigned char type
+ (from johannes@sipsolutions.net (Johannes Berg)).
+
+
+ svn path=/trunk/mono/; revision=61757
+
+commit de54a3e44b1214298b39386b49e1ca992176e2e4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 14 18:51:25 2006 +0000
+
+ 2006-05-14 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_fetchadd8_acq_hint_pred): Fix encoding of this
+ opcode.
+
+ svn path=/trunk/mono/; revision=60695
+
+commit 3b274ddc5c946640a4c0d6a7b2dee13cd2f5096d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Apr 21 14:51:24 2006 +0000
+
+ 2006-04-21 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Revert the last change as arm depends on the old
+ behaviour.
+
+ svn path=/trunk/mono/; revision=59758
+
+commit e830aadb2febf62051b8fc162884a909087cfe4e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Apr 12 19:02:09 2006 +0000
+
+ 2006-04-12 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_inst_i): New disassembly macro.
+
+ svn path=/trunk/mono/; revision=59415
+
+commit a65cd014e420a38b47e00f5c6f9ce590fc00987b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Apr 4 13:18:49 2006 +0000
+
+ 2006-04-04 Zoltan Varga <vargaz@gmail.com>
+
+ * Makefile.am (SUBDIRS): Avoid compiling subdirs needed by the
+ interpreter.
+
+ svn path=/trunk/mono/; revision=59009
+
+commit 0d566f3cb37ddf731fba6cfce9741e2224a13d77
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Mar 13 22:03:39 2006 +0000
+
+ * s390x-codegen.h: Fix immediate checks.
+
+ svn path=/trunk/mono/; revision=57914
+
+commit 15bc8b574c91bfaa40cd1d83374d0179148b5894
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jan 6 18:52:21 2006 +0000
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+ * mini-s390x.c, inssel-s390x.brg, cpu-s390x.md: Fix ATOMIC_I8
+ operations. Provide initial support for OP_ABS.
+
+ svn path=/trunk/mono/; revision=55158
+
+commit 1092c74e7a468b7761df92c2dc0dd2f2b49f21e6
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Jan 3 19:40:34 2006 +0000
+
+ * mono/io-layer/ChangeLog, mono/io-layer/atomic.h, mono/mini/mini-s390x.c,
+ mono/mini/mini-s390x.h, mono/mini/exceptions-s390x.c,
+ mono/mini/ChangeLog, mono/mini/s390-abi.cs, mono/mini/tramp-s390x.c,
+ mono/mini/inssel-s390x.brg, mono/mini/cpu-s390x.md, mono/mini/mini-codegen.c
+ mono/mini/basic-long.cs, mono/mini/Makefile.am, mono/arch/s390x/ChangeLog
+ mono/arch/s390x/s390x-codegen.h: 64-bit s390 support
+
+ svn path=/trunk/mono/; revision=55020
+
+commit 417b7fbe8f810e8fd62b2cb805164a3b80a536d6
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Dec 22 20:18:18 2005 +0000
+
+ 2005-12-22 Zoltan Varga <vargaz@gmail.com>
+
+ * sparc/sparc-codegen.h (sparc_membar): Add membar instruction.
+
+ svn path=/trunk/mono/; revision=54750
+
+commit 259b4749eaf68bfd6818ab38df91e37239c5dd45
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 13 19:12:20 2005 +0000
+
+ Continuing to bring s390 up to current levels
+
+ svn path=/trunk/mono/; revision=54312
+
+commit f5fc186c01c764705e303b3783bf06e507e54640
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Dec 13 13:57:51 2005 +0000
+
+ Avoid lvalue pointer casts.
+
+ svn path=/trunk/mono/; revision=54279
+
+commit ab97bc8d9e311f447d9f4a78e5a28ef6ff9b82ad
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 30 18:06:59 2005 +0000
+
+ 2005-10-30 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_m17): Fix a warning.
+
+ svn path=/trunk/mono/; revision=52399
+
+commit bb6893fc1e1854a8c9f848dfbfbc2dd00bde8735
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Oct 16 15:21:39 2005 +0000
+
+ 2005-10-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_CALLEE_SAVED_REGS): Add %rbp.
+
+ svn path=/trunk/mono/; revision=51764
+
+commit 0b2d13a625bfd03f8d24538ef48870daed540ee3
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 21:25:31 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51443
+
+commit 2bba48015b516fd326cd082eb85325aa5b7676bf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Oct 7 20:36:01 2005 +0000
+
+ Patch incorporated from SUSE, Neale reviewed it
+
+ svn path=/trunk/mono/; revision=51434
+
+commit 749c9989f64683d8363481304647924ec1d910af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 27 13:25:16 2005 +0000
+
+ Another compilation fix.
+
+ svn path=/trunk/mono/; revision=50857
+
+commit 64dbeb6e048aa9654800624a74e9c58065cf01ea
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Sep 27 09:09:41 2005 +0000
+
+ * arm/dpiops.sh, arm/fpaops.h: Output to stdout.
+ * arm/Makefile.am (arm_dpimacros.h, arm_fpamacros.h): Update. Fix
+ for srcdir != builddir.
+
+ svn path=/trunk/mono/; revision=50833
+
+commit 7c363c19299d3f85ee7de0eec2a83108ea98eff2
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 26 08:58:47 2005 +0000
+
+ Compilation fix.
+
+ svn path=/trunk/mono/; revision=50748
+
+commit 541c387c65579ca75abe8cdb9d0725c1e6d90df1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Sep 11 16:55:41 2005 +0000
+
+ 2005-09-11 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_unw_pop_frames): New unwind macro.
+
+ svn path=/trunk/mono/; revision=49910
+
+commit efbd8e41cf3337d59812a7cca48df3caee116b07
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 10 20:50:37 2005 +0000
+
+ 2005-09-10 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Remove 'manual' emitting of instructions.
+ Integrate emission of unwind directives into the assembly macros.
+
+ svn path=/trunk/mono/; revision=49875
+
+commit 8b07d9836f60fee4ff83a14ce110921be8ef8f2e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Sep 3 22:06:10 2005 +0000
+
+ 2005-09-04 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h (ia64_no_stop): New macro.
+
+ svn path=/trunk/mono/; revision=49399
+
+commit 4e89407a4a8dc38125a804df930515a31603cdca
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 27 14:33:09 2005 +0000
+
+ 2005-08-27 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/ia64-codegen.h: Fix some bugs.
+
+ * ia64/codegen.c: Update to work with latest ia64-codegen.h
+
+ svn path=/trunk/mono/; revision=48969
+
+commit 9a52b3ea85b1899c6cc23263eec6879841b3fd08
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Aug 26 13:34:24 2005 +0000
+
+ 2005-08-26 Zoltan Varga <vargaz@gmail.com>
+
+ * ia64/Makefile.am: Distribute ia64-codegen.h.
+
+ svn path=/trunk/mono/; revision=48891
+
+commit 16291812e22e9750bf101e297fc573ce35bab382
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:58:33 2005 +0000
+
+ Oops
+
+ svn path=/trunk/mono/; revision=48874
+
+commit d4b1ea47e0395555276e1a6c8ddfa3800692b6ea
+Author: Wade Berrier <wade@mono-cvs.ximian.com>
+Date: Fri Aug 26 06:48:41 2005 +0000
+
+ Include files for 'make dist'
+
+ svn path=/trunk/mono/; revision=48871
+
+commit cac0da0afb2a782de1db55a000a2125531e757fd
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 20 22:16:11 2005 +0000
+
+ 2005-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Improve ins scheduling and fix some bugs.
+
+ svn path=/trunk/mono/; revision=48614
+
+commit d151f0e0b203a78ca99cab91d9df89ffe7728880
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Aug 17 20:28:30 2005 +0000
+
+ 2005-08-17 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add dependency information for all instructions.
+
+ svn path=/trunk/mono/; revision=48476
+
+commit f1bce593b3504a82fc344d696eeedd91c39bcfee
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 4 18:51:34 2005 +0000
+
+ Uncommitted fixes.
+
+ svn path=/trunk/mono/; revision=48015
+
+commit 8348805e278d70da207455a0fe5cd470b00f3d8d
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 30 15:43:43 2005 +0000
+
+ 2005-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47855
+
+commit 0fb75c64cb1361cc81a4e47ca556a597b440d65a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jul 20 16:55:20 2005 +0000
+
+ Wed Jul 20 18:01:54 BST 2005 Paolo Molaro <lupus@ximian.com>
+
+ * arm/*: more codegen macros.
+
+
+ svn path=/trunk/mono/; revision=47473
+
+commit 2205bab6932e69490e48b9e11957041e938020ee
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Jul 18 20:33:37 2005 +0000
+
+ 2005-07-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_is_adds_imm): Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=47395
+
+commit 5a9a7537801ad68c0f8552e7e107994b793e93ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 22 22:00:43 2005 +0000
+
+ 2005-06-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add some new pseudo ops.
+
+ svn path=/trunk/mono/; revision=46401
+
+commit f51b94e34b1a887304ace96af27d51b4ec98ab4b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 19 20:18:07 2005 +0000
+
+ 2005-06-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Fix encoding of ia64_fclass.
+
+ svn path=/trunk/mono/; revision=46224
+
+commit 398224a9101808c8ca470b24366a506eeefec135
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Jun 12 20:41:05 2005 +0000
+
+ 2005-06-12 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45834
+
+commit 5a9f032072053d76af233b9906614ee491d6295c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Jun 9 20:22:08 2005 +0000
+
+ 2005-06-09 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45719
+
+commit 5f3ca7841b8aedd35f0c23781f2ac96f31ed501e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon May 30 14:09:48 2005 +0000
+
+ 2005-05-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/codegen.c: Fix it after latest changes.
+
+ svn path=/trunk/mono/; revision=45192
+
+commit d6844049f8659741b3afe9fa66136738107d28ac
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 14:24:56 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45159
+
+commit 4be6ea9e269927e9fbf06b0b73f53fef311f569f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 29 11:16:27 2005 +0000
+
+ 2005-05-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45157
+
+commit 7b483f1f48c7abc9d0c17a1fb34b30ddaa7058bb
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 18:02:41 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45147
+
+commit e360150e81b841b0644b5adc604f22f4b71e3987
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 28 17:08:04 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45145
+
+commit a781c3a65727b60386604adc6023f3f5a53b3e3e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 27 21:41:59 2005 +0000
+
+ 2005-05-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45127
+
+commit 20c2fc7ba73ffaf5506ab9bf487c3f519de5067f
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 26 17:16:50 2005 +0000
+
+ 2005-05-26 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=45064
+
+commit f37723d307325b539fc515774d3988e0c7ff7a14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 18:25:06 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44892
+
+commit 1d1c3f56953c0cb26c2e695b468ea1da368aaef0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 13:31:28 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44888
+
+commit e32454dae1a3679056fb4ac86ffc81defc3a5eb7
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun May 22 01:29:00 2005 +0000
+
+ 2005-05-22 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44883
+
+commit fee3f0247077513ba3254ddb410687a11c667b8c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri May 20 21:55:37 2005 +0000
+
+ 2005-05-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Ongoing IA64 work.
+
+ svn path=/trunk/mono/; revision=44855
+
+commit 1d94e7499dc18c3882f4aa16e977ceeaacddd466
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 23:02:39 2005 +0000
+
+ 2005-05-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h ia64/codegen.c: Ongoing ia64 work.
+
+ svn path=/trunk/mono/; revision=44722
+
+commit 3f053b86a49d8c41d47ca2ff771bda64ee5a5ddc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed May 18 18:55:54 2005 +0000
+
+ 2005-05-18 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h (ia64_codegen_init): Rename macro parameter.
+
+ svn path=/trunk/mono/; revision=44705
+
+commit 061e9ab4d483c98d6747caad5160bd30fbbf09ab
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 19:52:56 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * Makefile.am: Only compile libmonoarch if the interpreter is compiled.
+
+ svn path=/trunk/mono/; revision=44526
+
+commit 82a68f6e85fbc7aaa7832584b2f51953871f1390
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat May 14 17:35:42 2005 +0000
+
+ 2005-05-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * ia64/ia64-codegen.h: Add IA64 code generation macros.
+
+ * Makefile.am: Add ia64 subdir.
+
+ svn path=/trunk/mono/; revision=44523
+
+commit 800d43a2433ffc57d904687fdd2b746d5277cab5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu May 5 12:13:33 2005 +0000
+
+ 2005-05-05 Zoltan Varga <vargaz@freemail.hu>
+
+ * alpha/tramp.c: Applied patch from Jakub Bogusz <qboosh@pld-linux.org>.
+
+ svn path=/trunk/mono/; revision=44078
+
+commit 293459dd29bdd85542f499e0530c9504ced01604
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 28 21:09:11 2005 +0000
+
+ 2005-03-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Avoid emitting a rex in some places.
+
+ svn path=/trunk/mono/; revision=42316
+
+commit 140d5636edd892a388da877b7035f1809590e7ff
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 19:47:29 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_emit_rex): Emit a rex when accessing the
+ byte registers.
+
+ svn path=/trunk/mono/; revision=41848
+
+commit 242ec30220c85e3f69a1dd1d50469771c4ba7047
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 15 17:08:39 2005 +0000
+
+ 2005-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (AMD64_BYTE_REGS): Add AMD64_BYTE_REGS macro.
+
+ svn path=/trunk/mono/; revision=41842
+
+commit f7074904827b639bb500dcb92c481ec9f35a88a0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 14 15:17:54 2005 +0000
+
+ 2005-03-14 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add missing AMD64_XMM7.
+
+ svn path=/trunk/mono/; revision=41795
+
+commit d23ce2f6ba82d598af825e20b95cf7938ff5bc39
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 13 16:57:42 2005 +0000
+
+ 2005-03-13 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Remove some unneccesary REXes.
+
+ svn path=/trunk/mono/; revision=41765
+
+commit ad5014de38c4bde6ef12a04bbbcdf0303ac8acc1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 11:11:38 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsi2sd_reg_reg_size): Add _size
+ variants to some sse2 macros.
+
+ svn path=/trunk/mono/; revision=41557
+
+commit ee4c2805588b6d8291ac4349a520ca9c99050b59
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 8 09:28:19 2005 +0000
+
+ 2005-03-08 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_sse_cvtsd2si_reg_reg): Make this convert
+ to a 64 bit value.
+
+ svn path=/trunk/mono/; revision=41554
+
+commit 3c4a8677815d2ad4e0b47b809ca16b43f33e3f96
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Mar 6 21:25:22 2005 +0000
+
+ 2005-03-06 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add some SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=41491
+
+commit b175669d7abc2f7e83940305cf2cb1f7663569b0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 18:48:25 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add xadd instructions.
+
+ svn path=/trunk/mono/; revision=40956
+
+commit c7a5bc7b7055832a36dc63ba67ad7add33a95d06
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Feb 20 14:16:51 2005 +0000
+
+ 2005-02-20 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_jump_code_size): Do not emit a rex.
+
+ svn path=/trunk/mono/; revision=40934
+
+commit 2cf88a5c39f13e54cc5e5f95ab6021924077c1d8
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Feb 16 04:43:00 2005 +0000
+
+ remove .cvsignore, as this is not used anymore
+
+ svn path=/trunk/mono/; revision=40731
+
+commit 0c1ce771e696eabde58e35deb64c0b578be7a92d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jan 10 21:13:14 2005 +0000
+
+ - Fix atomic ops on s390
+ - Implement OP_ATOMIC_xxx operations on s390
+ - Standardize exception handling on s390 with other platforms
+ - Enable out of line bblock support
+ - Check vtable slot belongs to domain when building trampoline
+
+ svn path=/trunk/mono/; revision=38647
+
+commit 9f3d964963eac63f42db702fe80cbfa89e3a73b4
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Mon Dec 13 06:05:53 2004 +0000
+
+ remove svn:executable from *.cs *.c *.h
+
+ svn path=/trunk/mono/; revision=37682
+
+commit c7b8d172d479d75da8d183f9491e4651bbc5b4f7
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Tue Dec 7 04:18:03 2004 +0000
+
+ Fix atomic operations and add initial support for tls support.
+
+ svn path=/trunk/mono/; revision=37284
+
+commit c523c66bf11c9c05df3d77d42f8be9821ad558e5
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Nov 25 13:32:53 2004 +0000
+
+ 2004-11-25 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Updates to support the PIC changes.
+
+ svn path=/trunk/mono/; revision=36549
+
+commit da4b0970bffc8f281679bddf7371679910d0a23c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Nov 19 15:04:41 2004 +0000
+
+ Fri Nov 19 17:29:22 CET 2004 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: counter reg decrement branch values
+ (patch by Geoff Norton <gnorton@customerdna.com>).
+
+
+ svn path=/trunk/mono/; revision=36320
+
+commit 3e56873e56ee01f0195683a20bd44e0fd03db4ee
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Thu Nov 18 18:44:57 2004 +0000
+
+ 2004-11-16 Patrik Torstensson <patrik.torstensson@gmail.com>
+
+ * x86/x86-codegen.h: added opcodes for xadd instructions
+
+
+ svn path=/trunk/mono/; revision=36283
+
+commit 59c3726af38156a306a67c2dd6e755e8bdd0d89a
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Nov 17 03:05:28 2004 +0000
+
+ Add support for siginfo_t as a parameter to mono_arch_is_int_overflow. Support this
+ routine in s390.
+
+ svn path=/trunk/mono/; revision=36188
+
+commit 149905478e1af4189a0cd9cf3f0e294dbb2bccbc
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Nov 15 19:00:05 2004 +0000
+
+ 2004-11-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/x86-64-codegen.h: Get rid of this.
+
+ svn path=/trunk/mono/; revision=36145
+
+commit b982bf7e3e3e98afa37544b4a197d406f00b5e5a
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Nov 8 03:19:16 2004 +0000
+
+ fix
+
+ svn path=/trunk/mono/; revision=35803
+
+commit 4c5436f259d4a109ab352f2ec7b7891cdce76cc9
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Sep 6 15:07:37 2004 +0000
+
+ fix warning
+
+ svn path=/trunk/mono/; revision=33415
+
+commit 3a8f0a20bd939db788d3fd871b4c0ca37a4d0f96
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Wed Sep 1 01:04:04 2004 +0000
+
+ Support short forms of push imm
+
+ svn path=/trunk/mono/; revision=33128
+
+commit e11c33f0ae258eb62dd5fc2e4c6ce12952d25233
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 21:04:04 2004 +0000
+
+ 2004-08-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (amd64_imul_reg_membase_size): Fix REX
+ generation.
+
+ svn path=/trunk/mono/; revision=33003
+
+commit b0791969d5ddbcb465d86bcd42c86150f653a9a1
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 11:11:38 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: More SSE work.
+
+ svn path=/trunk/mono/; revision=32992
+
+commit 8ca359bb4894521802e1f2044ec55a9aada4c08e
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sun Aug 29 09:41:22 2004 +0000
+
+ 2004-08-29 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Add SSE2 instructions.
+
+ svn path=/trunk/mono/; revision=32991
+
+commit 39a59671ff853ab672d9db1c982093ee1c7cc1f8
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Aug 21 20:07:37 2004 +0000
+
+ 2004-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h (X86_IS_BYTE_REG): Redefine X86_IS_BYTE_REG
+ since under amd64, all 16 registers have a low part.
+
+ svn path=/trunk/mono/; revision=32632
+
+commit c6a18db1cda9d62eaba7e1095f34eb84e7c39a8b
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Aug 16 12:58:06 2004 +0000
+
+ 2004-08-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/x86-codegen.h: Add macros for accessing the mod/rm byte.
+
+ svn path=/trunk/mono/; revision=32365
+
+commit 7f2d7df98341055eaf370855c499508599770dec
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Sat Aug 14 18:28:26 2004 +0000
+
+ hush cvs
+
+ svn path=/trunk/mono/; revision=32344
+
+commit ee4209b85e88e6adfc07a057b41747607235805c
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Aug 6 16:28:23 2004 +0000
+
+ Support the MEMCPY(base, base) rule and add initial ARGLIST support
+
+ svn path=/trunk/mono/; revision=31985
+
+commit ee8712fd77bdd445d98c511a07f29b5136368201
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Aug 5 23:28:29 2004 +0000
+
+ Add s390x
+
+ svn path=/trunk/mono/; revision=31966
+
+commit 17467e9a25e9a1cf71c170fd85e042a5a11a0f05
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 20:43:11 2004 +0000
+
+ Further 64-bit S/390 updates
+
+ svn path=/trunk/mono/; revision=31898
+
+commit 4ad821169050e70979e71bbd5229557570059139
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Wed Aug 4 02:54:52 2004 +0000
+
+ S/390 64-bit support
+ tailc processing fix for S/390 32-bit
+
+ svn path=/trunk/mono/; revision=31840
+
+commit 5ebecc33aca9878d2071c8766e5741cd6434d676
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 23:11:29 2004 +0000
+
+ Add some s390 specific tests
+
+ svn path=/trunk/mono/; revision=31690
+
+commit 4e44c97a16962680e5009c97c0022e10ddbbad30
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jul 30 18:23:23 2004 +0000
+
+ Optimize code generation macros and standardize
+
+ svn path=/trunk/mono/; revision=31683
+
+commit 57ac232b2805d02a4e2b6322ed9532313337e56c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 30 16:01:49 2004 +0000
+
+ 2004-07-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31664
+
+commit 128d13d3973f07f5afba3ac7022bd9a4e7550626
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Thu Jul 29 17:10:53 2004 +0000
+
+ 2004-07-29 Ben Maurer <bmaurer@ximian.com>
+
+ * x86/x86-codegen.h: Add opcodes for cmp BYTE PTR [eax], imm
+
+ svn path=/trunk/mono/; revision=31622
+
+commit 77b5d5d9a5c508cef6a93be733818c446b9fe12c
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jul 28 20:14:03 2004 +0000
+
+ 2004-07-28 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31586
+
+commit a451b99d1a51fe3ffa7334ffbe6865f388e549c0
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Sat Jul 24 18:29:32 2004 +0000
+
+ 2004-07-24 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31431
+
+commit b58d4fba4fad9c9cd52604adf39ffe578e407b14
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 20:05:59 2004 +0000
+
+ 2004-07-23 Zoltan Varga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31426
+
+commit c7d11ced2179a38a406489b57f4a2f317fbe5da3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Jul 23 16:07:08 2004 +0000
+
+ 2004-07-23 zovarga <vargaz@freemail.hu>
+
+ * amd64/amd64-codegen.h: Ongoing JIT work.
+
+ svn path=/trunk/mono/; revision=31416
+
+commit f69c71790b01b62dd17d4479db005c3ef68e5e38
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 23:03:57 2004 +0000
+
+ Add mvcl instruction
+
+ svn path=/trunk/mono/; revision=31055
+
+commit c9c82671d87761dc9a06b78082402924cf8f540d
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Mon Jul 12 12:05:08 2004 +0000
+
+ Add instructions to support enhanced memory-to-memory operations.
+
+ svn path=/trunk/mono/; revision=31039
+
+commit 08a92e1c00c0a0cf3c446257b446939062605260
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Jun 30 15:04:48 2004 +0000
+
+ 2004-06-30 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add SPARC64 support.
+
+ svn path=/trunk/mono/; revision=30577
+
+commit d1881ea0cd90053526fa30405f4aeac90e06b485
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri Jun 18 20:03:01 2004 +0000
+
+ Fix broken ABI for stack parameters
+
+ svn path=/trunk/mono/; revision=29915
+
+commit 4e0bce5ca726ed3d2a33d6cfdc3b41b04fcb91f8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jun 17 16:25:19 2004 +0000
+
+ API cleanup fixes.
+
+ svn path=/trunk/mono/; revision=29787
+
+commit 1ac8bbc10c8f2cff9fe8aef20bee51612aa77f88
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 16 15:24:15 2004 +0000
+
+ Wed Jun 16 18:11:41 CEST 2004 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, *.c, *.h: more API cleanups.
+
+ svn path=/trunk/mono/; revision=29691
+
+commit cf789b0df2ab67298e712242ca201bd01d38c254
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 21 13:04:55 2004 +0000
+
+ More encoding fixes.
+
+ svn path=/trunk/mono/; revision=27820
+
+commit 47892f7ea09d90ff4385b3f9c3796d5ce80ee76d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon May 10 14:37:42 2004 +0000
+
+ Fix macros.
+
+ svn path=/trunk/mono/; revision=27028
+
+commit e85ff74df8db9dbeaa2f923b2d4b451fd84dcdc0
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Sat May 8 01:03:26 2004 +0000
+
+ 2004-05-07 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/ppc-codegen.h: remove GCC-ism in ppc_emit32
+
+ svn path=/trunk/mono/; revision=26957
+
+commit f4dcc4e46be455a7a289a969529ba4a1cd0bc3f3
+Author: Neale Ferguson <neale@mono-cvs.ximian.com>
+Date: Fri May 7 19:53:40 2004 +0000
+
+ Bring s390 JIT up to date.
+
+ svn path=/trunk/mono/; revision=26943
+
+commit e79a83571f6126771c5e997560dd7e15c540df3f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Fri Apr 30 03:47:45 2004 +0000
+
+ 2004-04-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * ppc/tramp.c: use sizeof (stackval), fix
+ delegate tramp frame layout for Apple
+
+ svn path=/trunk/mono/; revision=26383
+
+commit f05e6864576c8c9e827cf6affbaff770732628d4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 29 18:59:24 2004 +0000
+
+ Fix stmw opcode with signed offsets.
+
+ svn path=/trunk/mono/; revision=26328
+
+commit 92e3edf52f04c550767f3ae59c0f7fcefb46cbf8
+Author: Urs C. Muff <urs@mono-cvs.ximian.com>
+Date: Wed Apr 28 03:59:07 2004 +0000
+
+ cleanup
+
+ svn path=/trunk/mono/; revision=26114
+
+commit ab07311f8d1aeb258795fc72c5ed216f603db092
+Author: David Waite <david@alkaline-solutions.com>
+Date: Tue Apr 27 04:13:19 2004 +0000
+
+ 2004-04-26 David Waite <mass@akuma.org>
+
+ * unknown.c: modify to have content for defined platforms (to
+ avoid ISO C warning)
+
+ svn path=/trunk/mono/; revision=26036
+
+commit 9b84c8398a2558c61613ec50d3c3546627ac1e2d
+Author: Raja R Harinath <harinath@hurrynot.org>
+Date: Tue Apr 13 04:31:05 2004 +0000
+
+ ignores
+
+ svn path=/trunk/mono/; revision=25379
+
+commit 8adf42aeb550308e5a30e4308ad639fafa27e7e3
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:44:17 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h:
+ fix displacements in FP instrs
+
+ svn path=/trunk/mono/; revision=24755
+
+commit e82c4f6b16e7d3a7bdabe2df046b7ce17d91e716
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 30 01:18:11 2004 +0000
+
+ 2004-03-29 Bernie Solomon <bernard@ugsolutions.com>
+
+ * amd64/tramp.c:
+ * arm/tramp.c:
+ * hppa/tramp.c:
+ * ppc/tramp.c:
+ * s390/tramp.c:
+ * sparc/tramp.c:
+ * x86/tramp.c:
+ remove child from MonoInvocation as it isn't used.
+
+ svn path=/trunk/mono/; revision=24751
+
+commit 73296dcd03106668c5db4511948983bdadeaee2f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 23 22:01:55 2004 +0000
+
+ 2004-03-23 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/hppa-codegen.h: created
+
+ * hppa/tramp.c: changed style to be more like
+ other platforms.
+
+ * hppa/Makefile.am: add hppa-codegen.h
+
+ svn path=/trunk/mono/; revision=24504
+
+commit 6e46d909fa182adf4051e1a3c07bae63b93a2bc3
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Tue Mar 16 19:22:52 2004 +0000
+
+ 2004-03-16 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add v9 branches with prediction.
+
+ svn path=/trunk/mono/; revision=24153
+
+commit 49a337364d8413d2528fe97e68f16ef610bb3c6a
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:20:03 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=24136
+
+commit ce4b3b024bba2c8bd4d874a75ef7aa23e118abf7
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:35 2004 +0000
+
+ Rename, since stupid cvs gets confused with the dash in x86-64
+
+ svn path=/trunk/mono/; revision=24134
+
+commit 01dc8bdaddab8f9b1c939716c36d13a35cf2494d
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:16:07 2004 +0000
+
+ Added back
+
+ svn path=/trunk/mono/; revision=24133
+
+commit a97ef493bb1e42b3afa548e47e3e14afe028b3ef
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Mar 16 16:03:49 2004 +0000
+
+ Add x86-64
+
+ svn path=/trunk/mono/; revision=24131
+
+commit 25f79c5f1b26de4e7a413128d37731e1fcf09f14
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Mar 16 00:02:55 2004 +0000
+
+ 2004-03-15 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h: tweak sparc_mov_reg_reg
+ so Sun's dis command recognizes it.
+
+ svn path=/trunk/mono/; revision=24084
+
+commit 38dd3d4c585c7e9cc116b7dfb5e89356c4d02da2
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Mar 15 17:28:56 2004 +0000
+
+ 2004-03-15 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Add some v9 instructions.
+
+ svn path=/trunk/mono/; revision=24050
+
+commit 36d64a0bbf92ca51335ddcb87627a8194f601820
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Mar 11 18:23:26 2004 +0000
+
+ 2004-03-11 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Ongoing sparc work.
+
+ svn path=/trunk/mono/; revision=23926
+
+commit 7e46377b331225994068d848d9ff8ceaeb96d38a
+Author: Duncan Mak <duncan@mono-cvs.ximian.com>
+Date: Mon Mar 8 01:47:03 2004 +0000
+
+ 2004-03-07 Duncan Mak <duncan@ximian.com>
+
+ * Makefile.am: Removed the reference to 'x86-64'. This was the cause
+ of the missing Mono daily tarballs, 'make dist' wasn't working.
+
+ We do have an 'amd64' directory, but it doesn't make it in 'make
+ dist'.
+
+ svn path=/trunk/mono/; revision=23784
+
+commit 94156ea640c77f37c64332acd21adf4170ecb67b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sat Feb 28 15:53:18 2004 +0000
+
+ Add
+
+ svn path=/trunk/mono/; revision=23562
+
+commit c2492eb99fe2c3e148a8dc629cc283fafad7af7c
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:30 2004 +0000
+
+ Remove amd64
+
+ svn path=/trunk/mono/; revision=23540
+
+commit c58af24e593b96f1ccc7819ab100063aa4db3c54
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Fri Feb 27 17:03:17 2004 +0000
+
+ Add x86-64 directory
+
+ svn path=/trunk/mono/; revision=23539
+
+commit 7fd6186b66f081ef6c0fca7708ddf8a641a09eae
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Tue Feb 24 18:01:50 2004 +0000
+
+ Add amd64 support patch from Zalman Stern
+
+ svn path=/trunk/mono/; revision=23411
+
+commit 5d0cafa77c2cd95cb92a2990184bac64ec287016
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:14:37 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/sparc-codegen.h: Fix lots of opcodes + add new ones.
+
+ svn path=/trunk/mono/; revision=23248
+
+commit f9f3c20b070f92bcf6f85f5bd68a24c3434fe6c4
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Feb 19 14:13:23 2004 +0000
+
+ 2004-02-19 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Fix alignment of structures containing doubles.
+
+ svn path=/trunk/mono/; revision=23247
+
+commit bb16201aaa018434f551c2657d9e38f28dfe8904
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Mon Feb 2 15:56:15 2004 +0000
+
+ 2004-02-02 Zoltan Varga <vargaz@freemail.hu>
+
+ * sparc/tramp.c: Implement all floating point argument passing conventions in
+ Sparc V8. Also fix structure passing in V8.
+
+ svn path=/trunk/mono/; revision=22704
+
+commit 66607f84556593e2c3aa39bba418801193b6fddf
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Sun Jan 18 18:00:40 2004 +0000
+
+ Apply patches from Neale Ferguson for s390 support
+
+ svn path=/trunk/mono/; revision=22226
+
+commit 963e1b962894e9b434a2e80e63394bd0d34e68b8
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 3 21:42:37 2004 +0000
+
+ Codegen macros for mips.
+
+ svn path=/trunk/mono/; revision=21658
+
+commit 7e4789fdfc87f75e63612fe0aca1f66d76134ba9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Dec 3 16:48:07 2003 +0000
+
+ Typo fix.
+
+ svn path=/trunk/mono/; revision=20745
+
+commit 96651158bf48aa1c31b5f2e3ca4cbf904211b1dc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Nov 13 15:23:48 2003 +0000
+
+ Thu Nov 13 16:24:29 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: fixed most of the incorrect macros from ct.
+
+ svn path=/trunk/mono/; revision=19938
+
+commit ebebe8e4565897dfaad69911c88f4dda134d4b84
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Fri Oct 31 13:03:36 2003 +0000
+
+ 2003-10-31 Zoltan Varga <vargaz@freemail.hu>
+
+ * */tramp.c (mono_create_method_pointer): Rename to
+ mono_arch_create_method_pointer, move common code to a new function in
+ interp.c.
+
+ * */tramp.c (mono_create_trampoline): Rename to
+ mono_arch_create_trampoline for consistency.
+
+ svn path=/trunk/mono/; revision=19500
+
+commit c41c989929efaf77826634392c8ce9c54525809d
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Tue Oct 14 05:17:17 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * x86/tramp.c: restore EDX after memcpy call
+
+ svn path=/trunk/mono/; revision=19024
+
+commit e4f9a75ed58f5ca214a685041f2a538e2f40fe1f
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:56:37 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * Makefile.am: add hppa subdir
+
+ svn path=/trunk/mono/; revision=18999
+
+commit fa30eb232e53c9e39eec1bd44189e8ac29ba1644
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:48:11 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * hppa/tramp.c: add initial implementation - this is 64 bit only
+ hppa/Makefile.am hppa/.cvsignore: added
+
+ svn path=/trunk/mono/; revision=18996
+
+commit 0b0945abf1e873f6a8dfb527236d8cce2ce15574
+Author: Bernie Solomon <bernard@mono-cvs.ximian.com>
+Date: Mon Oct 13 22:38:25 2003 +0000
+
+ 2003-10-13 Bernie Solomon <bernard@ugsolutions.com>
+
+ * sparc/sparc-codegen.h sparc/tramp.c: add initial implementation
+ for V9 (64 bit), cover more 32 bit cases as well.
+
+ svn path=/trunk/mono/; revision=18995
+
+commit 6519bafeae686f3b32870a17dc1c84ae90ec95f9
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Wed Sep 3 08:10:57 2003 +0000
+
+ 2003-09-03 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17839
+
+commit 935c93eeaff3ad8ccee032ade3584a7f6ab8f4a1
+Author: Ben Maurer <benm@mono-cvs.ximian.com>
+Date: Mon Aug 25 13:38:19 2003 +0000
+
+ .cvsignore update
+
+ svn path=/trunk/mono/; revision=17581
+
+commit 0fed0582997210e2a0ac71a527dbd319a85aebcb
+Author: ct <ct@localhost>
+Date: Sun Aug 24 22:49:45 2003 +0000
+
+ completed the set of floating point ops
+
+ svn path=/trunk/mono/; revision=17564
+
+commit 3d0f6d935e3a9c180d0bbb14fc371d40e53b7872
+Author: Zoltan Varga <vargaz@gmail.com>
+Date: Thu Aug 21 15:23:31 2003 +0000
+
+ 2003-08-21 Zoltan Varga <vargaz@freemail.hu>
+
+ * x86/tramp.c: Fixes from Bernie Solomon (bernard@ugsolutions.com).
+
+ svn path=/trunk/mono/; revision=17470
+
+commit ed628ad0776db600fab8d5e4bcd6b563f5e808fd
+Author: ct <ct@localhost>
+Date: Tue Aug 19 03:04:34 2003 +0000
+
+ added more asm macros for floating point subtraction of single/double/quad
+
+ svn path=/trunk/mono/; revision=17394
+
+commit 6260d65a087be486df039c80eba92e44eb7a220d
+Author: ct <ct@localhost>
+Date: Tue Aug 19 02:53:23 2003 +0000
+
+ added floating point instructions for adding double, single, and quad numbers
+
+ svn path=/trunk/mono/; revision=17393
+
+commit c750ad8fea95e1fc81150e516ee26fbe79ab570d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Aug 7 14:13:05 2003 +0000
+
+ Fixed imm16 range check.
+
+ svn path=/trunk/mono/; revision=17157
+
+commit ebc38557433accd79fce2e38dff0505dfded5691
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jul 31 14:32:42 2003 +0000
+
+ Thu Jul 31 16:19:07 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in, etc.: portability fixes and support for
+ buidling outside the srcdir from Laurent Morichetti <l_m@pacbell.net>.
+
+ svn path=/trunk/mono/; revision=16937
+
+commit 6e851a87092161092c6e8f06f4de13fb45bc04a6
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jul 1 11:12:47 2003 +0000
+
+ Tue Jul 1 13:03:43 CEST 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/tramp.c: update from Laramie Leavitt (lar@leavitt.us).
+
+ svn path=/trunk/mono/; revision=15809
+
+commit c439e3df5cfa7c67d976258228cb9188a218c21d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jun 25 13:18:00 2003 +0000
+
+ FP control word enum.
+
+ svn path=/trunk/mono/; revision=15623
+
+commit 2ad34b0dc225bf0b2efeea63c2f9287a1dbad162
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jun 9 18:28:54 2003 +0000
+
+ Small updates.
+
+ svn path=/trunk/mono/; revision=15250
+
+commit df86960d595f0284a453fe3fc67687b707148dbf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed May 21 17:57:05 2003 +0000
+
+ Some fixes and more complete support.
+
+ svn path=/trunk/mono/; revision=14769
+
+commit 3af153bd53728da9da9215141b1341d60b447bd3
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed May 21 12:45:22 2003 +0000
+
+ 2003-05-21 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): dont allocate
+ I1 to registers because there is no simply way to sign extend 8bit
+ quantities in caller saved registers on x86.
+
+ * inssel-float.brg: set costs of some rules to 2 so
+ that monobure always select the arch. specific ones if supplied,
+ regardless of the order we pass the files to monoburg.
+
+ svn path=/trunk/mono/; revision=14757
+
+commit c4eeb3dfdd19546fb0712e5306d8d96a9a07580e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 20 10:44:31 2003 +0000
+
+ 2003-05-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * mini-x86.c (mono_arch_get_allocatable_int_vars): allocate 8/16
+ bit values to registers
+
+ svn path=/trunk/mono/; revision=14720
+
+commit 3a48ea89b161b268bb74f013cc36f6aec59e550b
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Thu May 1 23:42:01 2003 +0000
+
+ * tramp.c (mono_create_trampoline): tiny register allocation fix for reference types
+
+ svn path=/trunk/mono/; revision=14195
+
+commit 7595b109642f29ffe0cf8bb3e4411243b92a606f
+Author: Malte Hildingson <malte@mono-cvs.ximian.com>
+Date: Sun Apr 27 16:04:54 2003 +0000
+
+ * tramp.c (alloc_code_buff): posix memory protection.
+ (mono_create_trampoline): new string marshaling + minor fixes.
+ (mono_create_method_pointer): delegates fix.
+
+ svn path=/trunk/mono/; revision=14046
+
+commit dfe276d1e1d116b113a639eecbc14c3661af5462
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:50:16 2003 +0000
+
+ arm-WMMX.h: initial WirelessMMX support for ARM codegen;
+
+ svn path=/trunk/mono/; revision=14044
+
+commit 27eb0661916c7c65b43def99be92895c61f4d315
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Apr 27 14:47:57 2003 +0000
+
+ * ARM codegen update;
+
+ svn path=/trunk/mono/; revision=14043
+
+commit e1b54daadf68eef0608ac03bd6fe4dc374d78675
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Apr 27 11:40:11 2003 +0000
+
+ Make the debugging output off by default.
+
+ svn path=/trunk/mono/; revision=14039
+
+commit e679a120b848ea9e35e7c8a38ca3e03a386371c7
+Author: Patrik Torstensson <totte@mono-cvs.ximian.com>
+Date: Fri Feb 14 10:01:29 2003 +0000
+
+ 2003-02-14 Patrik Torstensson
+
+ * x86-codegen.h: Added fstsw op code for getting fp flags
+
+ svn path=/trunk/mono/; revision=11577
+
+commit f468e62377dfe3079f5b2bade1f43d239842e381
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Feb 1 10:02:52 2003 +0000
+
+ Sat Feb 1 10:59:31 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: update from Laramie.
+
+ svn path=/trunk/mono/; revision=11090
+
+commit cc3953655f65398b40e11fdcc97b1ae47bebfdc1
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 27 11:54:14 2003 +0000
+
+ Mon Jan 27 12:49:10 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * alpha/*: start of the port to the alpha architecture by
+ Laramie Leavitt (<lar@leavitt.us>).
+
+ svn path=/trunk/mono/; revision=10942
+
+commit 898dd64bddf69974ae9a22d6aa0ce9625fc9a5a0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jan 21 16:33:33 2003 +0000
+
+ Tue Jan 21 17:29:53 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/ppc-codegen.h: completed ppc native code generation by
+ Taylor Christopher P <ct@cs.clemson.edu>.
+
+ svn path=/trunk/mono/; revision=10778
+
+commit d2321af1b58b2fbb84c3b2cf3f6c7c7db0a787a4
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jan 17 20:17:58 2003 +0000
+
+ Fri Jan 17 21:14:18 CET 2003 Paolo Molaro <lupus@ximian.com>
+
+ * ppc/tramp.c: adapted to work for MacOSX (from a patch by
+ John Duncan).
+
+ svn path=/trunk/mono/; revision=10630
+
+commit 6d1b716753c1cc8a2f5c26338020941aa58ce9d7
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Jan 15 15:21:26 2003 +0000
+
+ Update to the API change of a while ago.
+
+ svn path=/trunk/mono/; revision=10545
+
+commit d4f44103ed442b9a6e221b58b68550c1de4dfa2b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Nov 11 19:13:08 2002 +0000
+
+ Some debugging stubs.
+
+ svn path=/trunk/mono/; revision=8922
+
+commit b669ce7ac5106466cc6d57e9163ca5d6d80611aa
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 24 19:27:13 2002 +0000
+
+ s390 support from Neale Ferguson <Neale.Ferguson@SoftwareAG-USA.com>.
+
+ svn path=/trunk/mono/; revision=8521
+
+commit 457b666522f839e5e94e5fdda2284255b26d79a2
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Oct 7 03:36:50 2002 +0000
+
+ Fix some minor trampoline nags. Now down to 15 failed tests. Delegate code
+ still broken, if anyone wants to help fix it.
+
+ svn path=/trunk/mono/; revision=8041
+
+commit b6d66c3ac8ae39c47b99dd8b8a7813e6f60c47e7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Oct 3 15:30:05 2002 +0000
+
+ Changes to tramp.c. Pass more tests.
+
+ svn path=/trunk/mono/; revision=7966
+
+commit e5d299dd18e820d33cf1d74e0e2de53e163cc07b
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 04:50:10 2002 +0000
+
+ Stupid off-by-one error fixed.
+
+ The problem was that I incremented gr as if we were on a PPC box. Sparc
+ doesn't need such "alignment" of the registers.
+
+ svn path=/trunk/mono/; revision=7800
+
+commit a9d8f44092c7c313efae893ff64306dc92985110
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Wed Sep 25 01:52:30 2002 +0000
+
+ arch/sparc/tramp.c: Fixed once again. Now works, mostly.
+ io-layer/atomic.h: It's sparc on gcc/solaris, and __sparc__ on gcc/linux.
+ had to add an #ifdef.
+
+ svn path=/trunk/mono/; revision=7798
+
+commit 0110bf4a5a435c5d60583887e0e0f28b7993a4cf
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Mon Sep 23 02:25:43 2002 +0000
+
+ Starting rewrite of trampolining for SPARC. It needed some cleanup.
+
+ It doesn't work at all now. GO PROGRESS!
+
+ svn path=/trunk/mono/; revision=7728
+
+commit fe7d0f819c55d76f0cb7a54ba66d4368d40385bd
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Thu Sep 19 18:30:56 2002 +0000
+
+ Beginning to add support for Solaris. Tested on Solaris 9.
+
+ Shared handles are still not working, will be addressed soon.
+
+ Trampoline code still broken, expect a rewrite.
+
+ svn path=/trunk/mono/; revision=7622
+
+commit 13eb9f4ebf45ffe17d555458cec8bbecefc71849
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 15:26:29 2002 +0000
+
+ retval value type fixed
+
+ svn path=/trunk/mono/; revision=7127
+
+commit 63315827a2ebc424954f4b8baf40497a5600ce7a
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 28 14:41:08 2002 +0000
+
+ fixed valuetypes marshaling in delegates
+
+ svn path=/trunk/mono/; revision=7126
+
+commit 82d4a3ff22ea8e8dfb9a3ec2be10657e7e25cd97
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Aug 24 23:54:12 2002 +0000
+
+ fixed struct marshaling, 108 tests pass now
+
+ svn path=/trunk/mono/; revision=7013
+
+commit b94511c33193dc728e039fa776bf3b9d5dad4e5b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed Aug 21 17:47:34 2002 +0000
+
+ fixed delegates
+
+ svn path=/trunk/mono/; revision=6862
+
+commit fafa1892b8b0315cab29de09f09f2aa5041b61a7
+Author: Mark Crichton <crichton@mono-cvs.ximian.com>
+Date: Tue Aug 20 15:03:07 2002 +0000
+
+ This nearly completes SPARC trampoline support for mint/mono. The delegate
+ code still needs some work.
+
+ There are bugs. Send crash reports, as well as .cs code and exe's to
+ crichton@gimp.org
+
+ Also, if anyone gets Bus Errors in the code, let me know as well, I've been
+ hunting down alignment bugs as well.
+
+ svn path=/trunk/mono/; revision=6812
+
+commit f8f8b65c484f48436941e4985cfb4b837cff4ceb
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 5 17:28:10 2002 +0000
+
+ Mon Aug 5 19:21:19 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix random memory read in mono_create_method_pointer.
+
+ svn path=/trunk/mono/; revision=6436
+
+commit dc11862f43a6240bcc35d2ef96fb04750c4bf930
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Aug 5 16:43:06 2002 +0000
+
+ x86-codegen.h: fixed bug in x86_memindex_emit, for basereg == EBP && disp == imm32;
+
+ svn path=/trunk/mono/; revision=6433
+
+commit 60179dd8c27bf3c080ca2c7db818c01a51c9d4b1
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Aug 5 09:53:43 2002 +0000
+
+ 2002-08-05 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): fixed stack_size bug
+
+ svn path=/trunk/mono/; revision=6408
+
+commit e13f4a98c6fe61ec768b0da9d8832814a313ed78
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:34:20 2002 +0000
+
+ more WIP
+
+ svn path=/trunk/mono/; revision=6363
+
+commit f73afba7e99de872e4e9d9dcf3c7c483632f6bc6
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 18:13:59 2002 +0000
+
+ more surgery
+
+ svn path=/trunk/mono/; revision=6360
+
+commit 347f6a854167fa5a26484b83736de86f5ffd8ea0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri Aug 2 17:55:44 2002 +0000
+
+ did quick surgery to update for Dietmar's new code
+
+ svn path=/trunk/mono/; revision=6359
+
+commit cc4396df6db395836340d26ad2f2d920f946729f
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Aug 2 07:13:54 2002 +0000
+
+ 2002-08-02 Dietmar Maurer <dietmar@ximian.com>
+
+ * marshal.c (mono_delegate_to_ftnptr): pass delegate->target
+ instead of the delegate itself as this pointer (bug #28383)
+
+ svn path=/trunk/mono/; revision=6348
+
+commit fbb833e1937ec3e3183bd1219e0f2391faa62718
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 14:17:18 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): also push the value type pointer for
+ methods returning value types.
+ (mono_create_method_pointer): support valuetype returns.
+
+ * interp.c (ves_pinvoke_method): do not call stackval_from_data if the result
+ is a value type.
+
+ svn path=/trunk/mono/; revision=6311
+
+commit 27a4251f2a6fd091ddc8084ad14a8808c136431d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Aug 1 06:40:11 2002 +0000
+
+ 2002-08-01 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (stackval_from_data): add pinvoke argument
+ (stackval_to_data): add pinvoke argument. We need consider the
+ fact that unmanages structures may have different sizes.
+
+ * x86/tramp.c (mono_create_method_pointer): allocate space for
+ value types.
+
+ svn path=/trunk/mono/; revision=6308
+
+commit 1be0ee94a17d2a4b7edb513d845d88ba5fed8285
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:53:19 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/tramp.c: (mono_create_method_pointer): return method->addr for pinvoke methods
+
+ * interp.c (ves_exec_method): bug fix - directly jump to handle_exception.
+
+ svn path=/trunk/mono/; revision=6280
+
+commit 87f9fd554284e9d2037c8757a4211cf710a85ac0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Jul 31 11:00:53 2002 +0000
+
+ 2002-07-31 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c: use the new marshaling code. better delegate/remoting
+ support.
+
+ * debug-helpers.c (mono_method_full_name): only print a number to
+ indicate wrapper type (so that the output is more readable in traces).
+
+ * x86/tramp.c: remove code to handle PInvoke because this is no
+ longer needed.
+
+ svn path=/trunk/mono/; revision=6278
+
+commit ebf4ad275e84a3887798ac765bdc1f0ed457cd5a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Jul 19 12:21:01 2002 +0000
+
+ Fri Jul 19 14:18:36 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix float loads. Simple delegate marshaling fix.
+
+ svn path=/trunk/mono/; revision=5909
+
+commit 2b677a332d7e811ca9cc75d271d069787f0495c1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jul 8 16:13:36 2002 +0000
+
+ 2002-07-08 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: marshaling for SZARRAY
+
+ svn path=/trunk/mono/; revision=5650
+
+commit ef9afb744f4679c465be380b4285928fff50db5e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sat Jul 6 01:41:14 2002 +0000
+
+ 2002-07-05 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: removed magic hack
+
+ svn path=/trunk/mono/; revision=5614
+
+commit 02476784232f22f91e347750c3fb8018d770d057
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Jun 18 04:38:23 2002 +0000
+
+ Tue Jun 18 10:21:56 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal simple arrays correctly.
+
+ svn path=/trunk/mono/; revision=5316
+
+commit 5ff6eebba3bc5e1662b84a34a276d6842e41ab87
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jun 1 08:08:34 2002 +0000
+
+ Kill warning.
+
+ svn path=/trunk/mono/; revision=5075
+
+commit 0c268fdddc804751bba57401c02b139368f7a01c
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 31 10:55:37 2002 +0000
+
+ Compilation fixes.
+
+ svn path=/trunk/mono/; revision=5054
+
+commit 9fe623bf5c85da9328f895680d8688987a94427e
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 30 11:04:53 2002 +0000
+
+ 2002-05-30 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg (reg): bug fix in LOCALLOC
+
+ * mono.c (main): new switch --nointrinsic to disable memcpy opt.
+
+ * x86.brg: added block copy/init optimizations from
+ Serge (serge@wildwestsoftware.com)
+
+ svn path=/trunk/mono/; revision=5025
+
+commit 1b8d1ed7ce3e489dcf53cc2369a3d6d482d5901d
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Tue May 28 12:23:00 2002 +0000
+
+ 2002-05-28 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: impl. CKFINITE
+
+ svn path=/trunk/mono/; revision=4988
+
+commit b0826d366f4f32c6ef772c0a9deef5a9b4157f0b
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon May 27 22:56:15 2002 +0000
+
+ Updated copyright headers to the standard template
+
+ svn path=/trunk/mono/; revision=4975
+
+commit 027755140cf39776018e520f7cd838e319fb9a34
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu May 23 07:44:00 2002 +0000
+
+ 2002-05-23 Dietmar Maurer <dietmar@ximian.com>
+
+ * delegate.c: move the thread pool to metadata/threadpool.c, code
+ cleanup.
+
+ * threadpool.[ch]: impl. a threadpool that can
+ be used by mint and mono.
+
+ svn path=/trunk/mono/; revision=4875
+
+commit be70e94a20c2c1864f829122085bce03f24cc4e8
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Wed May 15 14:19:24 2002 +0000
+
+ fixed delegates return values
+
+ svn path=/trunk/mono/; revision=4662
+
+commit 89d436d12d5746d04d9f27d9897853f846d0500e
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 19:00:42 2002 +0000
+
+ 2002-05-13 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): fix I8 parameters
+
+ svn path=/trunk/mono/; revision=4601
+
+commit 8e8d0cf9ac1f4aa46da775bed8da214581345ddb
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon May 13 17:24:04 2002 +0000
+
+ introduced DEBUG, disabled by default
+
+ svn path=/trunk/mono/; revision=4599
+
+commit 8d20a830d50aaf3f30869283332d654472f16890
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Fri May 10 19:25:15 2002 +0000
+
+ * x86-codegen.h: renamed FP int macro for consistency (its arg is really a membase, not mem);
+
+ svn path=/trunk/mono/; revision=4500
+
+commit 9fb095d7866ee9963f11e3bd2dcc9b9930320ddc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Fri May 10 13:39:09 2002 +0000
+
+ updated for new strings
+
+ svn path=/trunk/mono/; revision=4484
+
+commit 5d0a1992c7fe0252457f6644198654d06ee7a19f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 10 07:24:08 2002 +0000
+
+ Fix checks in x86_patch().
+
+ svn path=/trunk/mono/; revision=4473
+
+commit 512203d918c6998f9652d23301b553c2bb205788
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:39:01 2002 +0000
+
+ Logged changes to x86-codegen.h
+
+ svn path=/trunk/mono/; revision=4344
+
+commit 9d1e2b5076d08bd02eb28ad8b3f2a27a42449250
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon May 6 16:33:54 2002 +0000
+
+ * x86-codegen.h: added missing shifts;
+ 8-bit ALU operations;
+ FPU ops with integer operand;
+ FIST (without pop);
+
+ svn path=/trunk/mono/; revision=4343
+
+commit 944736b70eb0689f094fe05c7184d36f7b7421bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri May 3 12:52:19 2002 +0000
+
+ Added some missing FP opcodes and made x86_patch() handle also the call opcode.
+
+ svn path=/trunk/mono/; revision=4252
+
+commit d8cf0bf0270efb923d7c6e80c4e5d547d1161740
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 29 12:14:39 2002 +0000
+
+ Removed mono_string_new_wrapper().
+
+ svn path=/trunk/mono/; revision=4151
+
+commit cc03dca33b721c5b46cba47ff7a7bb80b820be6d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Apr 22 07:32:11 2002 +0000
+
+ Mon Apr 22 12:57:31 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added loop instructions and made x86_patch fully
+ useful.
+
+ svn path=/trunk/mono/; revision=3950
+
+commit ab877e78de2c3ac01664dc13c13c2f231fca4c11
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Apr 20 14:32:46 2002 +0000
+
+ 2002-04-20 Dietmar Maurer <dietmar@ximian.com>
+
+ * interp.c (ves_exec_method): support internalcall String constructors
+
+ svn path=/trunk/mono/; revision=3925
+
+commit d4ccb473cf835fd07294b7da6a6d4da9e2022dcd
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Apr 10 12:34:16 2002 +0000
+
+ Forgot to commit.
+
+ svn path=/trunk/mono/; revision=3740
+
+commit 9116ce23467ea863a99b860849d867802c32187a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Apr 6 10:40:58 2002 +0000
+
+ Sat Apr 6 16:29:40 CEST 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix advancement od argument position on the stack.
+
+ svn path=/trunk/mono/; revision=3652
+
+commit bf0fa05ecc5f3537597c10704414544c50d3a0ed
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Apr 4 04:42:46 2002 +0000
+
+ Remove useless comments in rules.
+
+ svn path=/trunk/mono/; revision=3595
+
+commit 3f3f1e23c3cced2e37ec49361ee3236c524ed107
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Sat Mar 30 11:19:26 2002 +0000
+
+ fixed compiler warnings
+
+ svn path=/trunk/mono/; revision=3514
+
+commit 793cfcbae98d4847ff08aff44ffa27020260c317
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Mar 16 14:37:28 2002 +0000
+
+ Sat Mar 16 19:12:57 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: increase default allocated size for trampolines
+ and assert on overflow.
+
+ svn path=/trunk/mono/; revision=3143
+
+commit af361d9d30702937e3cd9412b987552f4652887a
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Mar 14 09:52:53 2002 +0000
+
+ 2002-03-14 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_create_native_wrapper): new code to generate
+ wrappers for calling native functions.
+
+ * icall.c (ves_icall_InternalInvoke): impl.
+
+ svn path=/trunk/mono/; revision=3103
+
+commit 670be867554bb6f1ed61a17649e21d0e25f66105
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Mar 11 11:24:33 2002 +0000
+
+ Mon Mar 11 16:14:29 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: addex x86_clear_reg() and changed
+ x86_mov_reg_imm() to not check for imm == 0.
+
+ svn path=/trunk/mono/; revision=3051
+
+commit 51d24bbb570af055b885dfe9f06e7717e4bb3b98
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Thu Feb 28 09:35:29 2002 +0000
+
+ impl. more CONV opcodes
+
+ svn path=/trunk/mono/; revision=2761
+
+commit d0370e0ab841b63f60170f3afcae9ee49e9faade
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Feb 28 07:43:49 2002 +0000
+
+ Thu Feb 28 12:34:21 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: start handling of more complex marshaling stuff.
+
+
+ Thu Feb 28 12:33:41 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * marshal.c, marshal.h: start of marshaling interface.
+
+ svn path=/trunk/mono/; revision=2759
+
+commit 29f73f5799fb9274a44c918cb4f63c606f765b96
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Wed Feb 27 09:12:27 2002 +0000
+
+ * Makefile.am: removed SCRIPT_SOURCES to fix automake issues.
+
+ svn path=/trunk/mono/; revision=2710
+
+commit a8b6a875977b2728019ea7cf2ea8dd432fe4469a
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:58:43 2002 +0000
+
+ * ChangeLog: ARM-related log entry.
+
+ svn path=/trunk/mono/; revision=2628
+
+commit f703ca24db3d380b37434e9f1cced6d0b45a5470
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Mon Feb 25 08:56:57 2002 +0000
+
+ * Makefile.am: added arm to DIST_SUBDIRS.
+
+ svn path=/trunk/mono/; revision=2627
+
+commit f107fb14e6c183972bec81e5727381f44c6a5333
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 20:46:13 2002 +0000
+
+ (mono_create_method_pointer): implements delegates with parameters
+ and return value
+
+ svn path=/trunk/mono/; revision=2618
+
+commit 2217d1a7da2572afd033b958454b9662c42022b9
+Author: Sergey Chaban <serge@mono-cvs.ximian.com>
+Date: Sun Feb 24 17:44:55 2002 +0000
+
+ * ARM support sources, initial check-in;
+
+ svn path=/trunk/mono/; revision=2615
+
+commit 56dde5e20e11f2d9d2a3522923a5a4729bed469f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 24 01:40:17 2002 +0000
+
+ 2002-02-24 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (mono_create_method_pointer): basic delegates
+ implementation, it works for simple delegates now and I am already
+ pretty close to have it working for every delegates, but I am
+ going to sleep and finish it tomorrow?
+
+ svn path=/trunk/mono/; revision=2611
+
+commit 0c4f3b00c8e831077c6ba1b28065e7be81bbff61
+Author: Jeffrey Stedfast <fejj@novell.com>
+Date: Fri Feb 22 19:43:09 2002 +0000
+
+ 2002-02-22 Jeffrey Stedfast <fejj@ximian.com>
+
+ * sparc/tramp.c (mono_create_trampoline): Much tinkering to get
+ the opcodes more correct. Still needs a lot of work.
+
+ svn path=/trunk/mono/; revision=2602
+
+commit 6bb3f7ead4ab8d574273f5bdacf32b29809ace80
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:57:29 2002 +0000
+
+ ops, fix return value passing
+
+ svn path=/trunk/mono/; revision=2526
+
+commit 725e90ef0e13752e357358ddef152a30beae174f
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 20:50:13 2002 +0000
+
+ added stack saving for most arguments
+
+ svn path=/trunk/mono/; revision=2523
+
+commit 5dbc4bd3639f2d012a1103ae1b0f911768e460ab
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Tue Feb 19 19:49:10 2002 +0000
+
+ 2002-02-19 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c (emit_save_parameters): don't start saving 64bit
+ values to
+ even registers
+
+ svn path=/trunk/mono/; revision=2519
+
+commit e756cc154586ebdd6f4bba8b730fca09611874cf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Feb 19 15:40:57 2002 +0000
+
+ Tue Feb 19 20:19:38 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: avoid pointer arthmetric (pointed out by Serge).
+
+
+ Tue Feb 19 20:20:15 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * dump.c: the prolog is before each arg in the custom attribute blob.
+
+ svn path=/trunk/mono/; revision=2513
+
+commit 1da21d342a98bedfc9295846080043d8946f4029
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 21:10:29 2002 +0000
+
+ la la la, ChangeLog entries
+
+ svn path=/trunk/mono/; revision=2463
+
+commit b7fa0baa6c15d3ee14a1b67dd5b56d21a931894b
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 20:02:39 2002 +0000
+
+ (mono_string_new_wrapper): new helper function, cut&pasted from
+ x86, modified to check for NULL text to avoid branching in
+ generated code
+ (calculate_sizes): updated for string retval changes
+ (emit_call_and_store_retval): updated for string retval
+
+ svn path=/trunk/mono/; revision=2461
+
+commit 2cee2566ae50aa32e13864135260e16fd21bfac1
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Sun Feb 17 19:41:12 2002 +0000
+
+ 2002-02-17 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: fixed minimal stack size, fixed string parameters,
+ fix byte and half word parameters
+
+ * ppc/ppc-codegen.h (ppc_mr): added lhz, lbz, sth
+
+ svn path=/trunk/mono/; revision=2460
+
+commit c6fd0cb7010239a29091a50aa5354e96f74bedf2
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 12:22:52 2002 +0000
+
+ added some docu
+
+ svn path=/trunk/mono/; revision=2372
+
+commit 6b6716c9eaa66549c9c1cf86934a54a830afc1b6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Feb 13 08:29:02 2002 +0000
+
+ pass the domain to mono_string_new
+
+ svn path=/trunk/mono/; revision=2365
+
+commit 0ffc7e417ee15973120c4f3a0cb0f2732c5c6633
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Mon Feb 11 22:48:46 2002 +0000
+
+ More
+
+ svn path=/trunk/mono/; revision=2341
+
+commit 6f7cdfa857058ee3662e1662190315c294188ae0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 13:49:06 2002 +0000
+
+ Mon Feb 11 18:40:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * sparc/*: sparc codegen header and some untested trampoline code.
+
+ svn path=/trunk/mono/; revision=2315
+
+commit d7a858a6ac5bc37435a157cf41eb63818905a7ea
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Feb 11 07:42:10 2002 +0000
+
+ Mon Feb 11 12:32:35 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix handling of multiple marshaleed strings.
+ * x86/x86-codegen.h: some code to patch branch displacements.
+
+ svn path=/trunk/mono/; revision=2308
+
+commit dd029fa4245c99073ae6863dcb8e1560cc1eedc0
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Feb 1 12:04:34 2002 +0000
+
+ SHR/SHL impl.
+
+ svn path=/trunk/mono/; revision=2224
+
+commit 4a977a50d70eb75760d9555854845d32595c4093
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Feb 1 11:22:35 2002 +0000
+
+ Fri Feb 1 16:03:53 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: exception fixes. Use mono_method_pointer_get ()
+ to easy porting to other archs. Some support for overflow detection.
+
+ Fri Feb 1 16:03:00 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c, ppc/tramp.c: implement mono_method_pointer_get ().
+
+
+ Fri Feb 1 16:13:20 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: add asserts if we are ever going to scribble over memory.
+ * socket-io.c: not all systems have AF_IRDA defined.
+
+ svn path=/trunk/mono/; revision=2223
+
+commit 2d3dbc6213f3e12d1c7b332d80fec81384612bf8
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 24 01:00:53 2002 +0000
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c (mono_create_trampoline): Do not try to create a
+ mono_string_new if the return value from the PInvoke code is
+ NULL.
+
+ 2002-01-23 Miguel de Icaza <miguel@ximian.com>
+
+ * genwrapper.pl: Added wrappers for the mono_glob functions.
+
+ * glob.c: New file, with globing functions used by the Directory
+ code.
+
+ svn path=/trunk/mono/; revision=2139
+
+commit 5291c24b937d193ef9861c87421bab87e0fcc4da
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Mon Jan 21 20:06:20 2002 +0000
+
+ ppc changes
+
+ svn path=/trunk/mono/; revision=2090
+
+commit b5472227702fc528149111f0c4406c9dadb9a9e0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Jan 14 07:00:24 2002 +0000
+
+ Mon Jan 14 11:50:16 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added overflow condition code and some aliases
+ for the other ccs.
+
+ svn path=/trunk/mono/; revision=1968
+
+commit a18abcd00665e9bc660b90cf4c0bdf86456067af
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Jan 10 16:13:26 2002 +0000
+
+ Thu Jan 10 19:36:27 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: fix mono_class_from_mono_type () for szarray types.
+ Remove unused cache check in mono_class_from_type_spec().
+ * icall.c: *type_from_name () functions handle simple arrays and byref.
+ * reflection.c: handle byref and szarray types. Handle methods without
+ body (gets P/Invoke compilation working). Handle types and fields in
+ get_token ().
+ * reflection.h: add rank to MonoTypeInfo.
+
+
+ Thu Jan 10 20:59:59 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: add a flag to mono_create_trampoline ()
+ to handle runtime methods.
+
+
+ Thu Jan 10 21:01:08 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: mono_create_trampoline (): the runtime argument is
+ needed to handle correctly delegates, the previous change in handling
+ the string return type broke them.
+
+ svn path=/trunk/mono/; revision=1950
+
+commit 66990d65e3ac907fe24cc5411591759ce60472b0
+Author: Matt Kimball <mkimball@mono-cvs.ximian.com>
+Date: Wed Jan 9 01:49:12 2002 +0000
+
+ Tue Jan 8 22:38:41 MST 2002 Matt Kimball <matt@kimball.net>
+
+ * x86/tramp.c: handle strings returned from functions in external
+ libraries by converting to a Mono string object after the pinvoke'd
+ function returns
+
+ svn path=/trunk/mono/; revision=1923
+
+commit ba9f9e77bf38e3bb4b1a888d39c7b0aab8ae09bf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Jan 5 11:15:42 2002 +0000
+
+ Sat Jan 5 15:48:04 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * icall.c: hack to make IsSubType work for TypeBuilders.
+ * reflection.c: emit constructors before methods.
+ Retrieve param names in mono_param_get_objects().
+
+
+ Sat Jan 5 15:45:14 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: allow classname:method name in --debug argument.
+ Fix box opcode for valuetypes. Fix a few opcode to take a 16 bit
+ index instead of 32 (stloc, ldloc, starg, etc.).
+
+
+ Sat Jan 5 15:51:06 CET 2002 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle short integer return types.
+
+ svn path=/trunk/mono/; revision=1852
+
+commit 0635ffef0b38bcf88cd3320939c1d96bf8bb8c0e
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Jan 3 20:13:47 2002 +0000
+
+ Fix build for new automakes, seems to work
+
+ svn path=/trunk/mono/; revision=1795
+
+commit 054ebda213a85e3a8a1770ec5e63831e3a0f06ba
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 20 15:20:42 2001 +0000
+
+ Thu Dec 20 20:13:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix create_method_pointer() to pass the arguments
+ correctly and add check for overflow.
+
+ svn path=/trunk/mono/; revision=1656
+
+commit faaadc7132a2cdd8c13adf7fbb79d32461759493
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Dec 17 06:50:02 2001 +0000
+
+ 2001-12-16 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (arch_handle_exception): new code to handle
+ exceptions inside unmanaged code.
+
+ * x86.brg: impl. SAVE_LMF, RESTORE_LMF, pass implizit valuetype
+ address as first argument.
+
+ * x86.brg: pass exceptions on the stack
+
+ * jit.h (ISSTRUCT): new macro to check for real value types
+ (return false for enum types).
+
+ * unicode.c (_wapi_unicode_to_utf8): byteswap UTF16 strings before
+ passing them to iconv
+
+ * file-io.c: raise exceptions if handle is invalid.
+
+ svn path=/trunk/mono/; revision=1603
+
+commit 35430229b14448182d84a7f9348995019251fb28
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Dec 13 11:03:21 2001 +0000
+
+ Thu Dec 13 15:56:53 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: x86_mov_memindex_imm() added.
+
+ svn path=/trunk/mono/; revision=1565
+
+commit 813f9d5a9dcbe48c711bbb8bacc876e976ce0aea
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 21:23:53 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * ppc/tramp.c: use r12 which is volatile instead of non-volatile
+ r14 to avoid saving
+
+ svn path=/trunk/mono/; revision=1482
+
+commit 0a65eb2cf0b69f68849e7196b6e00133b3ecf3fc
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 20:19:00 2001 +0000
+
+ 2001-11-29 Radek Doulik <rodo@ximian.com>
+
+ * Makefile.am (libmonoarch_la_LIBADD): added ppc to DIST_SUBDIRS
+ generate libmonoarch for ppc
+
+ svn path=/trunk/mono/; revision=1478
+
+commit c4f49a88d52479062bd8b95669cb90c1b86242d0
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:48 2001 +0000
+
+ added test
+
+ svn path=/trunk/mono/; revision=1477
+
+commit 2c1c4889b99aaf4be0b894ea24b4d92201cb282d
+Author: Radek Doulik <rodo@mono-cvs.ximian.com>
+Date: Thu Nov 29 19:32:19 2001 +0000
+
+ added files for initial ppc support
+
+ svn path=/trunk/mono/; revision=1476
+
+commit 719926a4c59c399767f10b9567859300a768b05a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Nov 27 10:30:39 2001 +0000
+
+ Tue Nov 27 15:24:07 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x96/x86-codegen.c: x86_lea_memindex() added.
+
+ svn path=/trunk/mono/; revision=1447
+
+commit c4a26e54cfa29ea5279d1964ef4ea7f6176c0357
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Nov 19 06:52:53 2001 +0000
+
+ Mon Nov 19 11:37:14 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: add mono_install_trampoline() so that the runtime
+ can register a function to create a trampoline: removes the ugly
+ requirement that a runtime needed to export arch_create_jit_trampoline.
+ * object.h, object.c: added mono_install_handler() so that the runtime
+ can install an handler for exceptions generated in C code (with
+ mono_raise_exception()). Added C struct for System.Delegate.
+ * pedump.c: removed arch_create_jit_trampoline.
+ * reflection.c: some cleanups to allow registering user strings and
+ later getting a token for methodrefs and fieldrefs before the assembly
+ is built.
+ * row-indexes.h: updates and fixes from the new ECMA specs.
+
+
+ Mon Nov 19 11:36:22 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * jit.c: use mono_install_trampoline (), instead of exporting
+ a function to a lower-level library.
+
+
+ Mon Nov 19 11:33:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: start adding support for handling exceptions across
+ managed/unmanaged boundaries. Cleanup Delegate method invocation.
+ Pass the correct target object in Delegate::Invoke and use the correct
+ 'this' pointer in ldvirtftn (bugs pointed out by Dietmar).
+
+ Mon Nov 19 11:32:28 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * main.c: remove arch_create_jit_trampoline().
+
+ svn path=/trunk/mono/; revision=1380
+
+commit af643d34335bfdc90a7455f99847e954456bb07d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Nov 14 15:18:56 2001 +0000
+
+ Wed Nov 14 19:21:26 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean as a return value.
+ * x96/x86-codegen.c: x86_widen_memindex() added.
+
+
+ Wed Nov 14 19:23:00 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: move the stack frame dumping code to a function so it can
+ be called from the debugger. Fix virtual method lookup for interfaces.
+ Throw exceptions instead of aborting in more places.
+ Print also the message in an exception. Updates for field renames in
+ corlib.
+
+
+ Wed Nov 14 19:26:06 CET 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.h, class.c: add a max_interface_id to MonoClass.
+ * icall.c: rename my_mono_new_object() to my_mono_new_mono_type()
+ since it's used to do that. Added mono_type_type_from_obj().
+ Make GetType() return NULL instead of segfaulting if the type was not
+ found. Handle simple arrays in assQualifiedName.
+ * object.h: add a struct to represent an Exception.
+ * reflection.c: output call convention in method signature.
+ Add code to support P/Invoke methods and fixed offsets for fields.
+
+ svn path=/trunk/mono/; revision=1352
+
+commit 041ab742894fbd6d90e2ffb3c6fddb60a869e952
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Nov 9 13:40:43 2001 +0000
+
+ 2001-11-09 Dietmar Maurer <dietmar@ximian.com>
+
+ * testjit.c (mono_analyze_stack): new BOX impl.
+
+ * x86.brg: implemented INITOBJ
+
+ * testjit.c (mono_analyze_stack): finished array support
+ (mono_analyze_stack): reimplemented DUP instruction
+
+ svn path=/trunk/mono/; revision=1308
+
+commit bff8e602354a8d32dfaed336600b5f648af06e70
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Thu Nov 8 21:38:32 2001 +0000
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * x86/tramp.c: Include stdlib to kill warning.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * main.c (dis_property_methods): Added missing colon which avoided
+ setting loc.t
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * interp.c: Include stdlib to kill warning.
+ (check_corlib): Adjust format encodings to remove warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * reflection.c (build_compressed_metadata): Eliminates warnings
+ and uses 64-bit clean code.
+
+ * metadata.c (mono_type_hash): Change signature to eliminate warnings.
+ (mono_type_equal): Change signature to eliminate warnings.
+
+ 2001-11-07 Miguel de Icaza <miguel@ximian.com>
+
+ * monoburg.y: Include string.h, stdlib.h to kill warnings.
+
+ * sample.brg: Include string.h to remove warnings.
+
+ svn path=/trunk/mono/; revision=1298
+
+commit 306ec85b780f5f9c99ffaf19f51baa6548a298a6
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Nov 7 06:33:48 2001 +0000
+
+ 2001-11-07 Dietmar Maurer <dietmar@ximian.com>
+
+ * emit-x86.c (enter_method): print out all method arguments
+ (x86_magic_trampoline): impl.
+ (arch_create_simple_jit_trampoline): we use different trampolines
+ for static methods (no need to write the address back into to
+ vtable).
+
+ svn path=/trunk/mono/; revision=1278
+
+commit 689da148c801d119d0d2722ef74a497e95c5f1b3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 22 09:24:31 2001 +0000
+
+ Mon Oct 22 15:20:14 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle boolean, u1 and i1 as return values.
+
+ svn path=/trunk/mono/; revision=1192
+
+commit f6b50c3852378ca35cef63056ddec70585b3ac32
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Oct 10 10:11:17 2001 +0000
+
+ Wed Oct 10 16:07:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.c: added x86_set_{reg,mem,membase}.
+
+ svn path=/trunk/mono/; revision=1133
+
+commit 27043fee95be8bec691045d7ab39b1be553550e9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 8 14:33:48 2001 +0000
+
+ Mon Oct 8 20:27:50 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: define NO_UNALIGNED_ACCESS for platforms that
+ can't read on unaligned boundaries
+
+
+ Mon Oct 8 16:12:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: use MonoArrayType to describe the shape of an array.
+ Guard against calling bsearch with a NULL pointer (pointed out by Laurent Rioux, smoux).
+ * image.c: endian fixes by Laurent Rioux.
+ * object.h, object.c: rename MonoStringObject to MonoString and
+ MonoArrayObject to MonoArray. Change some function names to conform to
+ the style mono_<object>_<action>. mono_string_new_utf16 () takes a
+ guint16* as first argument, so don't use char*.
+ Provide macros to do the interesting things on arrays in a portable way.
+ * threads-pthread.c: updates for the API changes and #include <sched.h>
+ (required for sched_yield()).
+ * icall.c: updates for the API changes above.
+ * Makefile.am, mono-endian.c. mono-endian.h: include unaligned read routines for
+ platforms that need them.
+
+
+ Mon Oct 8 16:13:55 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * get.c, get.h: MonoArray changed in MonoArrayType.
+ * main.c: guard against calling bsearch with a NULL pointer
+ (pointed out by Laurent Rioux, smoux).
+
+
+ Mon Oct 8 16:13:07 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: remove mono_get_ansi_string () and use
+ mono_string_to_utf8 () instead.
+
+
+ Mon Oct 8 16:14:40 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: use the accessors provided in object.h to deal with
+ MonoArrays. Updates for API renames in metadata. Throw exception
+ in ldelema if index is out of bounds.
+
+ svn path=/trunk/mono/; revision=1122
+
+commit 4ff31b89c4d3458dc378cd2e915ed08281a21a8b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Oct 4 13:32:23 2001 +0000
+
+ Thu Oct 4 19:10:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c: MonoTypes stored in MonoClass are stored as
+ fundamental MonoTypes when the class represents a
+ fundamental type (System.Int32, ...).
+ The TypeHandle return by ldtoken is a MonoType*.
+ * icall.c: ves_icall_get_data_chunk () write out all the
+ PE/COFF stuff. Implement ves_icall_define_method (),
+ ves_icall_set_method_body (), ves_icall_type_from_handle ().
+ * image.c: properly skip unknown streams.
+ * loader.h, loader.c: add type_class to mono_defaults.
+ * metadata.c, metadata.h: export compute_size () as
+ mono_metadata_compute_size () with a better interface.
+ Typo and C&P fixes.
+ * pedump.c: don't try to print the entry point RVA if there is no entry point.
+ * reflection.c, reflection.h: many cleanups, fixes, output method
+ signatures and headers, typedef and typeref info, compress the metadata
+ tables, output all the heap streams, cli header etc.
+ * row-indexes.h: typo fixes.
+
+
+ Thu Oct 4 19:09:13 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: allow marshalling valuetypes if they are
+ 4 bytes long.
+
+
+ Thu Oct 4 19:05:56 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * dis-cil.c: fix printing of exception stuff.
+ * dump.c: display some more info in the typedef table dump.
+ * main.c: typo fix and method list fix.
+
+ svn path=/trunk/mono/; revision=1071
+
+commit 7328e9088acbd2609dff8d07b841c3fafd894d25
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Oct 1 13:07:53 2001 +0000
+
+ Mon Oct 1 18:48:27 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: fix thinko (s/SUB/ADD/) in stack adjustment
+ and avoid a couple of unnecessary instructions.
+
+ svn path=/trunk/mono/; revision=1042
+
+commit 1fa26f9aa718559d3090d1c1275bf04d574368f0
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 28 13:49:47 2001 +0000
+
+ Fri Sep 28 19:26:30 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c: fix type comparison for arrays.
+ * loader.h, loader.c: half-assed fix to get more tests work in cygwin.
+ Added a couple of new classes to monodefaults.
+ * icall.c: added a couple of Reflection-related internalcalls.
+ * class.h, class.c: implemented mono_ldtoken () for RuntimeTypeHandles.
+ Added a byval_arg MonoType to MonoClass.
+
+
+ Fri Sep 28 19:43:12 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: marshal valuetypes that are enums.
+
+
+ Fri Sep 28 19:37:46 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: Implemented ldtoken, conv.ovf.i. Use MonoClass->byval_arg
+ (and remove related kludges). Don't choke on access to arrays of
+ references. Throw an exception when an internalcall or P/Invoke
+ function don't have an implementation. Throw and EngineException
+ for unimplemented opcodes.
+
+ svn path=/trunk/mono/; revision=1027
+
+commit 0122a3ea04b06d1d51f2756e48f6392ccac1096d
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 27 09:38:19 2001 +0000
+
+ Thu Sep 27 15:34:37 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: in memindex operand you can use X86_NOBASEREG
+ as basereg.
+
+ svn path=/trunk/mono/; revision=995
+
+commit a5844f903a68e9448d7031587ffbd02ed2c4f486
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Sep 26 10:33:18 2001 +0000
+
+ Wed Sep 26 16:29:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added memindex addressing mode encoding
+ (and mov to/from register opcodes).
+
+ svn path=/trunk/mono/; revision=984
+
+commit 1f45df6d593cd60780ea121d08ddd035a3418e4a
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 24 13:30:32 2001 +0000
+
+ Mon Sep 24 18:49:01 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: don't change a MONO_TYPE_STRING to a char*
+ when it's an argument to an internalcall.
+
+
+ Mon Sep 24 18:56:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * object.c, object.h: added mono_ldstr (), mono_string_is_interned () and
+ mono_string_intern () to implement the semantics of the ldstr opcode
+ and the interning of System.Strings.
+ * icall.c: provide hooks to make String::IsIntern and String::Intern
+ internalcalls.
+
+
+ Mon Sep 24 18:50:25 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: catch a few more error conditions with exceptions instead of
+ erroring out.
+ Don't use g_print() in stack traces because it doesn't work with
+ some float values.
+ When we call an instance method of a valuetype class, unbox the 'this'
+ argument if it is an object.
+ Use mono_ldstr () to implement the ldstr opcode: it takes care of
+ interning the string if necessary.
+ Implemented new opcodes: ckfinite, cgt.un, clt.un, ldvirtftn, ldarga.
+ Fixes to handle NaNs when comparing doubles.
+ Make sure the loaded assembly has an entry point defined.
+ Fixed portability bugs in neg and not opcodes.
+
+ svn path=/trunk/mono/; revision=943
+
+commit a995bd527db97e45d979a6b97e0a15a479d2e14b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sun Sep 23 07:49:26 2001 +0000
+
+ Sun Sep 23 13:44:57 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/tramp.c: handle MONO_TYPE_CLASS in trampolines.
+
+ svn path=/trunk/mono/; revision=927
+
+commit c9d21b14c718c8e7f3690f5d93ac349bbdd98d88
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Fri Sep 21 12:50:46 2001 +0000
+
+ implemented more opcodes
+
+ svn path=/trunk/mono/; revision=916
+
+commit a0930b7dcd7fe845e1c3c06f3fba6736f88d8bf9
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 20 15:31:50 2001 +0000
+
+ Thu Sep 20 16:32:42 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: implemented some more opcodes: calli, rem.un,
+ shr.un, conv.u, cpobj, stobj, conv.r.un, conv.ovf.i1.un,
+ conv.ovf.i2.un, conv.ovf.i4.un, conv.ovf.i8.un, conv.ovf.i.un,
+ conv.ovf.u1.un, conv.ovf.u2.un, conv.ovf.u4.un, conv.ovf.u8.un,
+ conv.ovf.u.un.
+ Fix some 64 bit issues in the array element access code and a small bug.
+ Throw an exception on index out of range instead of asserting.
+ Throw an exception on a NULL array instead of dying.
+ Stomped a memory corruption bug (.cctor methods were freed after
+ executing them, but they are stores in MonoClass now...).
+ Added a simple facility to invoke the debugger when a named
+ function is entered (use the cmdline option --debug method_name).
+ * interp.h: fix 64 bit issue.
+
+ svn path=/trunk/mono/; revision=904
+
+commit e177e60b93378860f0573f458d06cd641770a255
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Tue Sep 18 07:26:43 2001 +0000
+
+ Tue Sep 18 13:23:59 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: remove C++ comments.
+
+ svn path=/trunk/mono/; revision=865
+
+commit 4f874ee6ae2442c99421087b5ad11eae88283d55
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 09:10:44 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86.brg: emit real code for calls
+
+ * testjit.c (create_jit_trampoline): creates a function to trigger jit
+ compilation.
+ (mono_compile_method): reversed argument order
+
+ svn path=/trunk/mono/; revision=842
+
+commit 011e42b68518f5c1397ecdc0417c021b4c524560
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 17 07:18:11 2001 +0000
+
+ 2001-09-17 Dietmar Maurer <dietmar@ximian.com>
+
+ * x86/x86-codegen.h (x86_alu_reg_reg): replaced src/dest
+
+ svn path=/trunk/mono/; revision=841
+
+commit c61474703f058c226a94ba9cdfb1d19e3a45eecd
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Wed Sep 12 03:47:43 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=792
+
+commit db78bf2c09f07356fe4c8284d1a48fa9867bd2fc
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Sep 10 14:26:02 2001 +0000
+
+ Mon Sep 10 20:19:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * configure.in: check for sizeof(void*) and for the architecture.
+
+ Mon Sep 10 17:26:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am, x86/Makefile.am: conditional compile logic
+ to make porting to different targets easier.
+
+ Mon Sep 10 17:24:45 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: make it work for make distcheck.
+
+ Mon Sep 10 20:21:34 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * endian.h, assembly.c: fix some endianness issues.
+
+ Mon Sep 10 20:20:36 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c: endian fixes, comments.
+
+ svn path=/trunk/mono/; revision=783
+
+commit ce34fcec9c53a31ba2cd48f22c9a5099d02779e5
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:34:11 2001 +0000
+
+ *** empty log message ***
+
+ svn path=/trunk/mono/; revision=781
+
+commit 6c07667b555ca78bdad5d7b6e5aa87f8078c1989
+Author: Dietmar Maurer <dietmar@mono-cvs.ximian.com>
+Date: Mon Sep 10 09:14:46 2001 +0000
+
+ added the jit prototype, small fixes
+
+ svn path=/trunk/mono/; revision=780
+
+commit 680963c46ae8b96cca52387e0f5b1a2e39825b90
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Fri Sep 7 12:53:34 2001 +0000
+
+ Fri Sep 7 18:43:06 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fixes and x86_mov_membase_imm ().
+ * x86/tramp.c: implemented mono_create_method_pointer ():
+ creates a native pointer to a method implementation that can be
+ used as a normal C callback.
+
+
+ Fri Sep 7 18:45:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: make ves_exec_method () and stackval_from_data ()
+ non static. Implement a couple of runtime methods needed to
+ use delegates (ves_runtime_method ()).
+ Implemented ldftn opcode.
+
+ svn path=/trunk/mono/; revision=745
+
+commit 4c39a186f2fa0dc3cca3ae6f6dc6584c75341adf
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Thu Sep 6 09:46:03 2001 +0000
+
+ Thu Sep 6 15:38:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: added x86_rdtsc() and fixes.
+ * x86/tramp.c: create trampolines to call pinvoke methods.
+ * x86/Makefile.am: create a libmonoarch convenience library.
+
+
+ Thu Sep 6 15:41:24 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * Makefile.am: link to libmonoarch.
+ * interp.h, interp.c: use mono_create_trampoline ().
+ Pass the command line arguments to Main (String[]) methods.
+
+ svn path=/trunk/mono/; revision=728
+
+commit d3a5cf739f1182a42d20f1d5ace2a272307da87f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Mon Aug 27 03:43:09 2001 +0000
+
+ Mon Aug 27 09:29:00 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix x86_call_code (). x86_mov_regp_reg () added.
+
+ svn path=/trunk/mono/; revision=636
+
+commit 231c25bd596aa45a2962a9c820fc9417985a1f3f
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Sat Aug 18 06:55:29 2001 +0000
+
+ Sat Aug 18 12:40:32 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h: fix a couple of buglets and add x86_regp_emit().
+
+ Sat Aug 18 12:42:26 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * class.c, class.h: load also the methods when loading a class.
+
+ Sat Aug 18 12:43:38 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * interp.c, interp.h: added support code to create exceptions.
+ Changed interncal calling convnetion over to MonoInvocation, to support
+ exceptions, walking the stack back and forward and passing the 'this'
+ pointer separately (remove the cludges required before to pass this on the
+ stack). Use alloca heavily for both local vars and a copy of the incoming
+ arguments. Init local vars to zero.
+ Simplify stackval_from_data() and stackval_to_data() to only take a pointer
+ instead of pointer + offset.
+ Implement a few exceptions-related opcodes and the code to run finally, fault and
+ catch blocks as well as a stack trace if no handler is found.
+
+ Sat Aug 18 12:51:28 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * metadata.c, metadata.h: in the signature and method header store
+ only the space required for holding the loca vars and incoming arguments.
+
+ svn path=/trunk/mono/; revision=493
+
+commit 75cdbf5cd16480631ac8579c2c2f230761e4802b
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 17:21:29 2001 +0000
+
+ Fixed x86_mov_reg_imm().
+
+ svn path=/trunk/mono/; revision=441
+
+commit 5263eb4d219b8054b29a4d250cec40a7c8170a84
+Author: Miguel de Icaza <miguel@gnome.org>
+Date: Wed Aug 8 16:48:32 2001 +0000
+
+ Update copyright
+
+ svn path=/trunk/mono/; revision=440
+
+commit c9397770c008d427da0b7ad058782fc8564c10d3
+Author: Paolo Molaro <lupus@oddwiz.org>
+Date: Wed Aug 8 13:32:23 2001 +0000
+
+ Wed Aug 8 15:30:05 CEST 2001 Paolo Molaro <lupus@ximian.com>
+
+ * x86/x86-codegen.h, x86/test.c: added x86 code emitter with
+ test.
+
+ svn path=/trunk/mono/; revision=435
diff --git a/src/arch/LICENSE b/src/arch/LICENSE
new file mode 100644
index 0000000..cb4a84d
--- /dev/null
+++ b/src/arch/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2001, 2002, 2003 Ximian, Inc and the individuals listed
+on the ChangeLog entries.
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/arch/Makefile.am b/src/arch/Makefile.am
new file mode 100644
index 0000000..8741687
--- /dev/null
+++ b/src/arch/Makefile.am
@@ -0,0 +1,11 @@
+DIST_SUBDIRS = x86 ppc sparc arm arm64 s390x amd64 ia64 mips
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+if ARM
+# arm needs to build some stuff even in JIT mode
+SUBDIRS = $(arch_target)
+endif
+
+EXTRA_DIST = ChangeLog
+
diff --git a/src/arch/README b/src/arch/README
new file mode 100644
index 0000000..cfed57d
--- /dev/null
+++ b/src/arch/README
@@ -0,0 +1,7 @@
+mono_arch
+=========
+
+Part of Mono project, https://github.com/mono
+
+These are C macros that are useful when generating native code on various platforms.
+This code is MIT X11 licensed.
diff --git a/src/arch/arm/.gitattributes b/src/arch/arm/.gitattributes
new file mode 100644
index 0000000..4819db1
--- /dev/null
+++ b/src/arch/arm/.gitattributes
@@ -0,0 +1 @@
+/arm-wmmx.h -crlf
diff --git a/src/arch/arm/.gitignore b/src/arch/arm/.gitignore
new file mode 100644
index 0000000..978145d
--- /dev/null
+++ b/src/arch/arm/.gitignore
@@ -0,0 +1,15 @@
+/Makefile
+/Makefile.in
+/.deps
+/.libs
+/*.o
+/*.la
+/*.lo
+/*.lib
+/*.obj
+/*.exe
+/*.dll
+/arm_dpimacros.h
+/arm_fpamacros.h
+/arm_vfpmacros.h
+/fixeol.sh
diff --git a/src/arch/arm/Makefile.am b/src/arch/arm/Makefile.am
new file mode 100644
index 0000000..593574c
--- /dev/null
+++ b/src/arch/arm/Makefile.am
@@ -0,0 +1,27 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-arm.la
+
+BUILT_SOURCES = arm_dpimacros.h arm_vfpmacros.h
+
+
+libmonoarch_arm_la_SOURCES = $(BUILT_SOURCES) \
+ arm-codegen.c \
+ arm-codegen.h \
+ arm-dis.c \
+ arm-dis.h
+
+arm_dpimacros.h: dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th
+ (cd $(srcdir); bash ./dpiops.sh) > $@t
+ mv $@t $@
+
+arm_vfpmacros.h: vfpops.sh vfpm_macros.th vfp_macros.th
+ (cd $(srcdir); bash ./vfpops.sh) > $@t
+ mv $@t $@
+
+CLEANFILES = $(BUILT_SOURCES)
+
+EXTRA_DIST = dpiops.sh mov_macros.th dpi_macros.th cmp_macros.th \
+ vfpm_macros.th vfp_macros.th arm-vfp-codegen.h vfpops.sh
+
diff --git a/src/arch/arm/arm-codegen.c b/src/arch/arm/arm-codegen.c
new file mode 100644
index 0000000..9914ace
--- /dev/null
+++ b/src/arch/arm/arm-codegen.c
@@ -0,0 +1,193 @@
+/*
+ * arm-codegen.c
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#include "arm-codegen.h"
+
+
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+
+ /* save args */
+ ARM_PUSH(p, (1 << ARMREG_A1)
+ | (1 << ARMREG_A2)
+ | (1 << ARMREG_A3)
+ | (1 << ARMREG_A4));
+
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs) {
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_ADD_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_ADD_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ ARM_POP_NWB(p, (1 << ARMREG_SP) | (1 << ARMREG_PC) | (pop_regs & 0x3FF));
+
+ return p;
+}
+
+
+/* do not push A1-A4 */
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs) {
+ ARM_MOV_REG_REG(p, ARMREG_IP, ARMREG_SP);
+ /* push_regs upto R10 will be saved */
+ ARM_PUSH(p, (1U << ARMREG_IP) | (1U << ARMREG_LR) | (push_regs & 0x3FF));
+
+ if (local_size != 0) {
+ if ((local_size & (~0xFF)) == 0) {
+ ARM_SUB_REG_IMM8(p, ARMREG_SP, ARMREG_SP, local_size);
+ } else {
+ /* TODO: optimize */
+ p = arm_mov_reg_imm32(p, ARMREG_IP, local_size);
+ ARM_SUB_REG_REG(p, ARMREG_SP, ARMREG_SP, ARMREG_IP);
+ /* restore IP from stack */
+ ARM_ADD_REG_IMM8(p, ARMREG_IP, ARMREG_IP, sizeof(armword_t));
+ ARM_LDR_REG_REG(p, ARMREG_IP, ARMREG_SP, ARMREG_IP);
+ }
+ }
+
+ return p;
+}
+
+/* Bit scan forward. */
+int arm_bsf(armword_t val) {
+ int i;
+ armword_t mask;
+
+ if (val == 0) return 0;
+ for (i=1, mask=1; (i <= 8 * sizeof(armword_t)) && ((val & mask) == 0); ++i, mask<<=1);
+
+ return i;
+}
+
+
+int arm_is_power_of_2(armword_t val) {
+ return ((val & (val-1)) == 0);
+}
+
+
+/*
+ * returns:
+ * 1 - unable to represent
+ * positive even number - MOV-representable
+ * negative even number - MVN-representable
+ */
+int calc_arm_mov_const_shift(armword_t val) {
+ armword_t mask;
+ int res = 1, shift;
+
+ for (shift=0; shift < 32; shift+=2) {
+ mask = ARM_SCALE(0xFF, shift);
+ if ((val & (~mask)) == 0) {
+ res = shift;
+ break;
+ }
+ if (((~val) & (~mask)) == 0) {
+ res = -shift - 2;
+ break;
+ }
+ }
+
+ return res;
+}
+
+
+int is_arm_const(armword_t val) {
+ int res;
+ res = arm_is_power_of_2(val);
+ if (!res) {
+ res = calc_arm_mov_const_shift(val);
+ res = !(res < 0 || res == 1);
+ }
+ return res;
+}
+
+
+int arm_const_steps(armword_t val) {
+ int shift, steps = 0;
+
+ while (val != 0) {
+ shift = (arm_bsf(val) - 1) & (~1);
+ val &= ~(0xFF << shift);
+ ++steps;
+ }
+ return steps;
+}
+
+
+/*
+ * ARM cannot load arbitrary 32-bit constants directly into registers;
+ * widely used work-around for this is to store constants into a
+ * PC-addressable pool and use LDR instruction with PC-relative address
+ * to load constant into register. Easiest way to implement this is to
+ * embed constant inside a function with unconditional branch around it.
+ * The above method is not used at the moment.
+ * This routine always emits sequence of instructions to generate
+ * requested constant. In the worst case it takes 4 instructions to
+ * synthesize a constant - 1 MOV and 3 subsequent ORRs.
+ */
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond) {
+ int mov_op;
+ int step_op;
+ int snip;
+ int shift = calc_arm_mov_const_shift(imm32);
+
+ if ((shift & 0x80000001) != 1) {
+ if (shift >= 0) {
+ ARM_MOV_REG_IMM_COND(p, reg, imm32 >> ((32 - shift) & 31), shift, cond);
+ } else {
+ ARM_MVN_REG_IMM_COND(p, reg, (imm32 ^ (~0)) >> ((32 + 2 + shift) & 31), (-shift - 2), cond);
+ }
+ } else {
+ mov_op = ARMOP_MOV;
+ step_op = ARMOP_ORR;
+
+ if (arm_const_steps(imm32) > arm_const_steps(~imm32)) {
+ mov_op = ARMOP_MVN;
+ step_op = ARMOP_SUB;
+ imm32 = ~imm32;
+ }
+
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, 0, 0, mov_op, cond));
+
+ while ((imm32 ^= snip) != 0) {
+ shift = (arm_bsf(imm32) - 1) & (~1);
+ snip = imm32 & (0xFF << shift);
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((unsigned)snip >> shift, (32 - shift) >> 1, reg, reg, 0, step_op, cond));
+ }
+ }
+
+ return p;
+}
+
+
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32) {
+ return arm_mov_reg_imm32_cond(p, reg, imm32, ARMCOND_AL);
+}
+
+
+
diff --git a/src/arch/arm/arm-codegen.h b/src/arch/arm/arm-codegen.h
new file mode 100644
index 0000000..d4d7f7c
--- /dev/null
+++ b/src/arch/arm/arm-codegen.h
@@ -0,0 +1,1127 @@
+/*
+ * arm-codegen.h
+ * Copyright (c) 2002-2003 Sergey Chaban <serge@wildwestsoftware.com>
+ * Copyright 2005-2011 Novell Inc
+ * Copyright 2011 Xamarin Inc
+ */
+
+
+#ifndef ARM_H
+#define ARM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned int arminstr_t;
+typedef unsigned int armword_t;
+
+/* Helper functions */
+arminstr_t* arm_emit_std_prologue(arminstr_t* p, unsigned int local_size);
+arminstr_t* arm_emit_std_epilogue(arminstr_t* p, unsigned int local_size, int pop_regs);
+arminstr_t* arm_emit_lean_prologue(arminstr_t* p, unsigned int local_size, int push_regs);
+int arm_is_power_of_2(armword_t val);
+int calc_arm_mov_const_shift(armword_t val);
+int is_arm_const(armword_t val);
+int arm_bsf(armword_t val);
+arminstr_t* arm_mov_reg_imm32_cond(arminstr_t* p, int reg, armword_t imm32, int cond);
+arminstr_t* arm_mov_reg_imm32(arminstr_t* p, int reg, armword_t imm32);
+
+
+
+#if defined(_MSC_VER) || defined(__CC_NORCROFT)
+ void __inline _arm_emit(arminstr_t** p, arminstr_t i) {**p = i; (*p)++;}
+# define ARM_EMIT(p, i) _arm_emit((arminstr_t**)&p, (arminstr_t)(i))
+#else
+# define ARM_EMIT(p, i) do { arminstr_t *__ainstrp = (void*)(p); *__ainstrp = (arminstr_t)(i); (p) = (void*)(__ainstrp+1);} while (0)
+#endif
+
+#if defined(_MSC_VER) && !defined(ARM_NOIASM)
+# define ARM_IASM(_expr) __emit (_expr)
+#else
+# define ARM_IASM(_expr)
+#endif
+
+/* even_scale = rot << 1 */
+#define ARM_SCALE(imm8, even_scale) ( ((imm8) >> (even_scale)) | ((imm8) << (32 - even_scale)) )
+
+
+
+typedef enum {
+ ARMREG_R0 = 0,
+ ARMREG_R1,
+ ARMREG_R2,
+ ARMREG_R3,
+ ARMREG_R4,
+ ARMREG_R5,
+ ARMREG_R6,
+ ARMREG_R7,
+ ARMREG_R8,
+ ARMREG_R9,
+ ARMREG_R10,
+ ARMREG_R11,
+ ARMREG_R12,
+ ARMREG_R13,
+ ARMREG_R14,
+ ARMREG_R15,
+
+
+ /* aliases */
+ /* args */
+ ARMREG_A1 = ARMREG_R0,
+ ARMREG_A2 = ARMREG_R1,
+ ARMREG_A3 = ARMREG_R2,
+ ARMREG_A4 = ARMREG_R3,
+
+ /* local vars */
+ ARMREG_V1 = ARMREG_R4,
+ ARMREG_V2 = ARMREG_R5,
+ ARMREG_V3 = ARMREG_R6,
+ ARMREG_V4 = ARMREG_R7,
+ ARMREG_V5 = ARMREG_R8,
+ ARMREG_V6 = ARMREG_R9,
+ ARMREG_V7 = ARMREG_R10,
+
+ ARMREG_FP = ARMREG_R11,
+ ARMREG_IP = ARMREG_R12,
+ ARMREG_SP = ARMREG_R13,
+ ARMREG_LR = ARMREG_R14,
+ ARMREG_PC = ARMREG_R15,
+
+ /* co-processor */
+ ARMREG_CR0 = 0,
+ ARMREG_CR1,
+ ARMREG_CR2,
+ ARMREG_CR3,
+ ARMREG_CR4,
+ ARMREG_CR5,
+ ARMREG_CR6,
+ ARMREG_CR7,
+ ARMREG_CR8,
+ ARMREG_CR9,
+ ARMREG_CR10,
+ ARMREG_CR11,
+ ARMREG_CR12,
+ ARMREG_CR13,
+ ARMREG_CR14,
+ ARMREG_CR15,
+
+ /* XScale: acc0 on CP0 */
+ ARMREG_ACC0 = ARMREG_CR0,
+
+ ARMREG_MAX = ARMREG_R15
+} ARMReg;
+
+/* number of argument registers */
+#define ARM_NUM_ARG_REGS 4
+
+/* bitvector for all argument regs (A1-A4) */
+#define ARM_ALL_ARG_REGS \
+ (1 << ARMREG_A1) | (1 << ARMREG_A2) | (1 << ARMREG_A3) | (1 << ARMREG_A4)
+
+
+typedef enum {
+ ARMCOND_EQ = 0x0, /* Equal; Z = 1 */
+ ARMCOND_NE = 0x1, /* Not equal, or unordered; Z = 0 */
+ ARMCOND_CS = 0x2, /* Carry set; C = 1 */
+ ARMCOND_HS = ARMCOND_CS, /* Unsigned higher or same; */
+ ARMCOND_CC = 0x3, /* Carry clear; C = 0 */
+ ARMCOND_LO = ARMCOND_CC, /* Unsigned lower */
+ ARMCOND_MI = 0x4, /* Negative; N = 1 */
+ ARMCOND_PL = 0x5, /* Positive or zero; N = 0 */
+ ARMCOND_VS = 0x6, /* Overflow; V = 1 */
+ ARMCOND_VC = 0x7, /* No overflow; V = 0 */
+ ARMCOND_HI = 0x8, /* Unsigned higher; C = 1 && Z = 0 */
+ ARMCOND_LS = 0x9, /* Unsigned lower or same; C = 0 || Z = 1 */
+ ARMCOND_GE = 0xA, /* Signed greater than or equal; N = V */
+ ARMCOND_LT = 0xB, /* Signed less than; N != V */
+ ARMCOND_GT = 0xC, /* Signed greater than; Z = 0 && N = V */
+ ARMCOND_LE = 0xD, /* Signed less than or equal; Z = 1 && N != V */
+ ARMCOND_AL = 0xE, /* Always */
+ ARMCOND_NV = 0xF, /* Never */
+
+ ARMCOND_SHIFT = 28
+} ARMCond;
+
+#define ARMCOND_MASK (ARMCOND_NV << ARMCOND_SHIFT)
+
+#define ARM_DEF_COND(cond) (((cond) & 0xF) << ARMCOND_SHIFT)
+
+
+
+typedef enum {
+ ARMSHIFT_LSL = 0,
+ ARMSHIFT_LSR = 1,
+ ARMSHIFT_ASR = 2,
+ ARMSHIFT_ROR = 3,
+
+ ARMSHIFT_ASL = ARMSHIFT_LSL
+ /* rrx = (ror, 1) */
+} ARMShiftType;
+
+
+typedef struct {
+ armword_t PSR_c : 8;
+ armword_t PSR_x : 8;
+ armword_t PSR_s : 8;
+ armword_t PSR_f : 8;
+} ARMPSR;
+
+typedef enum {
+ ARMOP_AND = 0x0,
+ ARMOP_EOR = 0x1,
+ ARMOP_SUB = 0x2,
+ ARMOP_RSB = 0x3,
+ ARMOP_ADD = 0x4,
+ ARMOP_ADC = 0x5,
+ ARMOP_SBC = 0x6,
+ ARMOP_RSC = 0x7,
+ ARMOP_TST = 0x8,
+ ARMOP_TEQ = 0x9,
+ ARMOP_CMP = 0xa,
+ ARMOP_CMN = 0xb,
+ ARMOP_ORR = 0xc,
+ ARMOP_MOV = 0xd,
+ ARMOP_BIC = 0xe,
+ ARMOP_MVN = 0xf,
+
+
+ /* not really opcodes */
+
+ ARMOP_STR = 0x0,
+ ARMOP_LDR = 0x1,
+
+ /* ARM2+ */
+ ARMOP_MUL = 0x0, /* Rd := Rm*Rs */
+ ARMOP_MLA = 0x1, /* Rd := (Rm*Rs)+Rn */
+
+ /* ARM3M+ */
+ ARMOP_UMULL = 0x4,
+ ARMOP_UMLAL = 0x5,
+ ARMOP_SMULL = 0x6,
+ ARMOP_SMLAL = 0x7,
+
+ /* for data transfers with register offset */
+ ARM_UP = 1,
+ ARM_DOWN = 0
+} ARMOpcode;
+
+typedef enum {
+ THUMBOP_AND = 0,
+ THUMBOP_EOR = 1,
+ THUMBOP_LSL = 2,
+ THUMBOP_LSR = 3,
+ THUMBOP_ASR = 4,
+ THUMBOP_ADC = 5,
+ THUMBOP_SBC = 6,
+ THUMBOP_ROR = 7,
+ THUMBOP_TST = 8,
+ THUMBOP_NEG = 9,
+ THUMBOP_CMP = 10,
+ THUMBOP_CMN = 11,
+ THUMBOP_ORR = 12,
+ THUMBOP_MUL = 13,
+ THUMBOP_BIC = 14,
+ THUMBOP_MVN = 15,
+ THUMBOP_MOV = 16,
+ THUMBOP_CMPI = 17,
+ THUMBOP_ADD = 18,
+ THUMBOP_SUB = 19,
+ THUMBOP_CMPH = 19,
+ THUMBOP_MOVH = 20
+} ThumbOpcode;
+
+
+/* Generic form - all ARM instructions are conditional. */
+typedef struct {
+ arminstr_t icode : 28;
+ arminstr_t cond : 4;
+} ARMInstrGeneric;
+
+
+
+/* Branch or Branch with Link instructions. */
+typedef struct {
+ arminstr_t offset : 24;
+ arminstr_t link : 1;
+ arminstr_t tag : 3; /* 1 0 1 */
+ arminstr_t cond : 4;
+} ARMInstrBR;
+
+#define ARM_BR_ID 5
+#define ARM_BR_MASK 7 << 25
+#define ARM_BR_TAG ARM_BR_ID << 25
+
+#define ARM_DEF_BR(offs, l, cond) ((offs) | ((l) << 24) | (ARM_BR_TAG) | (cond << ARMCOND_SHIFT))
+
+/* branch */
+#define ARM_B_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 0, cond))
+#define ARM_B(p, offs) ARM_B_COND((p), ARMCOND_AL, (offs))
+/* branch with link */
+#define ARM_BL_COND(p, cond, offset) ARM_EMIT(p, ARM_DEF_BR(offset, 1, cond))
+#define ARM_BL(p, offs) ARM_BL_COND((p), ARMCOND_AL, (offs))
+
+#define ARM_DEF_BX(reg,sub,cond) (0x12fff << 8 | (reg) | ((sub) << 4) | ((cond) << ARMCOND_SHIFT))
+
+#define ARM_BX_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 1, cond))
+#define ARM_BX(p, reg) ARM_BX_COND((p), ARMCOND_AL, (reg))
+
+#define ARM_BLX_REG_COND(p, cond, reg) ARM_EMIT(p, ARM_DEF_BX(reg, 3, cond))
+#define ARM_BLX_REG(p, reg) ARM_BLX_REG_COND((p), ARMCOND_AL, (reg))
+
+/* Data Processing Instructions - there are 3 types. */
+
+typedef struct {
+ arminstr_t imm : 8;
+ arminstr_t rot : 4;
+} ARMDPI_op2_imm;
+
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag : 1; /* 0 - immediate shift, 1 - reg shift */
+ arminstr_t type : 2; /* shift type - logical, arithmetic, rotate */
+} ARMDPI_op2_reg_shift;
+
+
+/* op2 is reg shift by imm */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t shift : 5;
+ } imm;
+} ARMDPI_op2_reg_imm;
+
+/* op2 is reg shift by reg */
+typedef union {
+ ARMDPI_op2_reg_shift r2;
+ struct {
+ arminstr_t _dummy_r2 : 7;
+ arminstr_t pad : 1; /* always 0, to differentiate from HXFER etc. */
+ arminstr_t rs : 4;
+ } reg;
+} ARMDPI_op2_reg_reg;
+
+/* Data processing instrs */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+
+ ARMDPI_op2_reg_shift op2_reg;
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ ARMDPI_op2_reg_reg op2_reg_reg;
+
+ struct {
+ arminstr_t op2 : 12; /* raw operand 2 */
+ arminstr_t rd : 4; /* destination reg */
+ arminstr_t rn : 4; /* first operand reg */
+ arminstr_t s : 1; /* S-bit controls PSR update */
+ arminstr_t opcode : 4; /* arithmetic/logic operation */
+ arminstr_t type : 1; /* type of op2, 0 = register, 1 = immediate */
+ arminstr_t tag : 2; /* 0 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrDPI;
+
+#define ARM_DPI_ID 0
+#define ARM_DPI_MASK 3 << 26
+#define ARM_DPI_TAG ARM_DPI_ID << 26
+
+#define ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, cond) \
+ ((imm8) & 0xFF) | \
+ (((rot) & 0xF) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (1 << 25) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_DEF_DPI_IMM(imm8, rot, rd, rn, s, op) \
+ ARM_DEF_DPI_IMM_COND(imm8, rot, rd, rn, s, op, ARMCOND_AL)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 0, (op), cond))
+#define ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(p, op, rd, rn, imm8, rot, cond) \
+ ARM_IASM(ARM_DEF_DPI_IMM_COND((imm8), ((rot) >> 1), (rd), (rn), 1, (op), cond))
+
+
+
+#define ARM_DEF_DPI_REG_IMMSHIFT_COND(rm, shift_type, imm_shift, rd, rn, s, op, cond) \
+ (rm) | \
+ ((shift_type & 3) << 5) | \
+ (((imm_shift) & 0x1F) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(p, op, rd, rn, rm, shift_t, imm_shift, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), shift_t, imm_shift, (rd), (rn), 1, (op), cond))
+
+#define ARM_IASM_DPIOP_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REG_COND(p, op, rd, rn, rm, cond) \
+ ARM_IASM_EMIT(ARM_DEF_DPI_REG_IMMSHIFT_COND((rm), ARMSHIFT_LSL, 0, (rd), (rn), 1, (op), cond))
+
+
+/* Rd := Rn op (Rm shift_type Rs) */
+#define ARM_DEF_DPI_REG_REGSHIFT_COND(rm, shift_type, rs, rd, rn, s, op, cond) \
+ (rm) | \
+ (1 << 4) | \
+ ((shift_type & 3) << 5) | \
+ ((rs) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((s) << 20) | \
+ ((op) << 21) | \
+ (ARM_DPI_TAG) | \
+ ARM_DEF_COND(cond)
+
+/* codegen */
+#define ARM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+/* inline */
+#define ARM_IASM_DPIOP_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 0, (op), cond))
+
+#define ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(p, op, rd, rn, rm, shift_t, rs, cond) \
+ ARM_IASM(ARM_DEF_DPI_REG_REGSHIFT_COND((rm), shift_t, (rs), (rd), (rn), 1, (op), cond))
+
+
+
+/* Multiple register transfer. */
+typedef struct {
+ arminstr_t reg_list : 16; /* bitfield */
+ arminstr_t rn : 4; /* base reg */
+ arminstr_t ls : 1; /* load(1)/store(0) */
+ arminstr_t wb : 1; /* write-back "!" */
+ arminstr_t s : 1; /* restore PSR, force user bit */
+ arminstr_t u : 1; /* up/down */
+ arminstr_t p : 1; /* pre(1)/post(0) index */
+ arminstr_t tag : 3; /* 1 0 0 */
+ arminstr_t cond : 4;
+} ARMInstrMRT;
+
+#define ARM_MRT_ID 4
+#define ARM_MRT_MASK 7 << 25
+#define ARM_MRT_TAG ARM_MRT_ID << 25
+
+#define ARM_DEF_MRT(regs, rn, l, w, s, u, p, cond) \
+ (regs) | \
+ (rn << 16) | \
+ (l << 20) | \
+ (w << 21) | \
+ (s << 22) | \
+ (u << 23) | \
+ (p << 24) | \
+ (ARM_MRT_TAG) | \
+ ARM_DEF_COND(cond)
+
+
+#define ARM_LDM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_STM(p, base, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, base, 0, 0, 0, 1, 0, ARMCOND_AL))
+
+/* stmdb sp!, {regs} */
+#define ARM_PUSH(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+#define ARM_IASM_PUSH(regs) ARM_IASM(ARM_DEF_MRT(regs, ARMREG_SP, 0, 1, 0, 0, 1, ARMCOND_AL))
+
+/* ldmia sp!, {regs} */
+#define ARM_POP(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 1, 0, 1, 0, ARMCOND_AL))
+
+/* ldmia sp, {regs} ; (no write-back) */
+#define ARM_POP_NWB(p, regs) ARM_EMIT(p, ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+#define ARM_IASM_POP_NWB(regs) ARM_IASM_EMIT(ARM_DEF_MRT(regs, ARMREG_SP, 1, 0, 0, 1, 0, ARMCOND_AL))
+
+#define ARM_PUSH1(p, r1) ARM_PUSH(p, (1 << r1))
+#define ARM_PUSH2(p, r1, r2) ARM_PUSH(p, (1 << r1) | (1 << r2))
+#define ARM_PUSH3(p, r1, r2, r3) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_PUSH4(p, r1, r2, r3, r4) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_PUSH5(p, r1, r2, r3, r4, r5) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_PUSH6(p, r1, r2, r3, r4, r5, r6) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_PUSH7(p, r1, r2, r3, r4, r5, r6, r7) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_PUSH8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_PUSH(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+
+#define ARM_POP8(p, r1, r2, r3, r4, r5, r6, r7, r8) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7) | (1 << r8))
+#define ARM_POP7(p, r1, r2, r3, r4, r5, r6, r7) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6) | (1 << r7))
+#define ARM_POP6(p, r1, r2, r3, r4, r5, r6) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5) | (1 << r6))
+#define ARM_POP5(p, r1, r2, r3, r4, r5) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4) | (1 << r5))
+#define ARM_POP4(p, r1, r2, r3, r4) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3) | (1 << r4))
+#define ARM_POP3(p, r1, r2, r3) ARM_POP(p, (1 << r1) | (1 << r2) | (1 << r3))
+#define ARM_POP2(p, r1, r2) ARM_POP(p, (1 << r1) | (1 << r2))
+#define ARM_POP1(p, r1) ARM_POP(p, (1 << r1))
+
+
+/* Multiply instructions */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 4; /* 9 */
+ arminstr_t rs : 4;
+ arminstr_t rn : 4;
+ arminstr_t rd : 4;
+ arminstr_t s : 1;
+ arminstr_t opcode : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrMul;
+
+#define ARM_MUL_ID 0
+#define ARM_MUL_ID2 9
+#define ARM_MUL_MASK ((0xF << 24) | (0xF << 4))
+#define ARM_MUL_TAG ((ARM_MUL_ID << 24) | (ARM_MUL_ID2 << 4))
+
+#define ARM_DEF_MUL_COND(op, rd, rm, rs, rn, s, cond) \
+ (rm) | \
+ ((rs) << 8) | \
+ ((rn) << 12) | \
+ ((rd) << 16) | \
+ ((s & 1) << 17) | \
+ ((op & 7) << 18) | \
+ ARM_MUL_TAG | \
+ ARM_DEF_COND(cond)
+
+/* Rd := (Rm * Rs)[31:0]; 32 x 32 -> 32 */
+#define ARM_MUL_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_MUL(p, rd, rm, rs) \
+ ARM_MUL_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MULS_COND(p, rd, rm, rs, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_MULS(p, rd, rm, rs) \
+ ARM_MULS_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_MUL_REG_REG(p, rd, rm, rs) ARM_MUL(p, rd, rm, rs)
+#define ARM_MULS_REG_REG(p, rd, rm, rs) ARM_MULS(p, rd, rm, rs)
+
+/* inline */
+#define ARM_IASM_MUL_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 0, cond))
+#define ARM_IASM_MUL(rd, rm, rs) \
+ ARM_IASM_MUL_COND(rd, rm, rs, ARMCOND_AL)
+#define ARM_IASM_MULS_COND(rd, rm, rs, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MUL, rd, rm, rs, 0, 1, cond))
+#define ARM_IASM_MULS(rd, rm, rs) \
+ ARM_IASM_MULS_COND(rd, rm, rs, ARMCOND_AL)
+
+
+/* Rd := (Rm * Rs) + Rn; 32x32+32->32 */
+#define ARM_MLA_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_MLA(p, rd, rm, rs, rn) \
+ ARM_MLA_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_MLAS_COND(p, rd, rm, rs, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_MLAS(p, rd, rm, rs, rn) \
+ ARM_MLAS_COND(p, rd, rm, rs, rn, ARMCOND_AL)
+
+/* inline */
+#define ARM_IASM_MLA_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 0, cond))
+#define ARM_IASM_MLA(rd, rm, rs, rn) \
+ ARM_IASM_MLA_COND(rd, rm, rs, rn, ARMCOND_AL)
+#define ARM_IASM_MLAS_COND(rd, rm, rs, rn, cond) \
+ ARM_IASM_EMIT(ARM_DEF_MUL_COND(ARMOP_MLA, rd, rm, rs, rn, 1, cond))
+#define ARM_IASM_MLAS(rd, rm, rs, rn) \
+ ARM_IASM_MLAS_COND(rd, rm, rs, rn, ARMCOND_AL)
+
+
+
+/* Word/byte transfer */
+typedef union {
+ ARMDPI_op2_reg_imm op2_reg_imm;
+ struct {
+ arminstr_t op2_imm : 12;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t b : 1;
+ arminstr_t u : 1; /* down(0) / up(1) */
+ arminstr_t p : 1; /* post-index(0) / pre-index(1) */
+ arminstr_t type : 1; /* imm(0) / register(1) */
+ arminstr_t tag : 2; /* 0 1 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrWXfer;
+
+#define ARM_WXFER_ID 1
+#define ARM_WXFER_MASK 3 << 26
+#define ARM_WXFER_TAG ARM_WXFER_ID << 26
+
+
+#define ARM_DEF_WXFER_IMM(imm12, rd, rn, ls, wb, b, p, cond) \
+ ((((int)imm12) < 0) ? -(int)(imm12) : (imm12)) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ (((int)(imm12) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_WXFER_MAX_OFFS 0xFFF
+
+/* this macro checks for imm12 bounds */
+#define ARM_EMIT_WXFER_IMM(ptr, imm12, rd, rn, ls, wb, b, p, cond) \
+ do { \
+ int _imm12 = (int)(imm12) < -ARM_WXFER_MAX_OFFS \
+ ? -ARM_WXFER_MAX_OFFS \
+ : (int)(imm12) > ARM_WXFER_MAX_OFFS \
+ ? ARM_WXFER_MAX_OFFS \
+ : (int)(imm12); \
+ ARM_EMIT((ptr), \
+ ARM_DEF_WXFER_IMM(_imm12, (rd), (rn), (ls), (wb), (b), (p), (cond))); \
+ } while (0)
+
+
+/* LDRx */
+/* immediate offset, post-index */
+#define ARM_LDR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 0, cond))
+
+#define ARM_LDR_IMM_POST(p, rd, rn, imm) ARM_LDR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 0, cond))
+
+#define ARM_LDRB_IMM_POST(p, rd, rn, imm) ARM_LDRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_LDR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+
+#define ARM_LDR_IMM(p, rd, rn, imm) ARM_LDR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_LDRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+
+#define ARM_LDRB_IMM(p, rd, rn, imm) ARM_LDRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* STRx */
+/* immediate offset, post-index */
+#define ARM_STR_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 0, cond))
+
+#define ARM_STR_IMM_POST(p, rd, rn, imm) ARM_STR_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_POST_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 0, cond))
+
+#define ARM_STRB_IMM_POST(p, rd, rn, imm) ARM_STRB_IMM_POST_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* immediate offset, pre-index */
+#define ARM_STR_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)
+/* ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 0, 1, cond)) */
+
+#define ARM_STR_IMM(p, rd, rn, imm) ARM_STR_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+#define ARM_STRB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_IMM(imm, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+
+#define ARM_STRB_IMM(p, rd, rn, imm) ARM_STRB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+/* write-back */
+#define ARM_STR_IMM_WB_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT_WXFER_IMM(p, imm, rd, rn, ARMOP_STR, 1, 0, 1, cond)
+#define ARM_STR_IMM_WB(p, rd, rn, imm) ARM_STR_IMM_WB_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, u, p, cond) \
+ (rm) | \
+ ((shift_type) << 5) | \
+ ((shift) << 7) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ ((b) << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ (1 << 25) | \
+ ARM_WXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_UP, p, cond)
+#define ARM_DEF_WXFER_REG_MINUS_REG_COND(rm, shift_type, shift, rd, rn, ls, wb, b, p, cond) \
+ ARM_DEF_WXFER_REG_REG_UPDOWN_COND(rm, shift_type, shift, rd, rn, ls, wb, b, ARM_DOWN, p, cond)
+
+
+#define ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 0, 1, cond))
+#define ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDR_REG_REG(p, rd, rn, rm) \
+ ARM_LDR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_LDR, 0, 1, 1, cond))
+#define ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_LDRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_LDRB_REG_REG(p, rd, rn, rm) \
+ ARM_LDRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+#define ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 0, 1, cond))
+#define ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STR_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STR_REG_REG(p, rd, rn, rm) \
+ ARM_STR_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+/* zero-extend */
+#define ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, cond) \
+ ARM_EMIT(p, ARM_DEF_WXFER_REG_REG_COND(rm, shift_type, shift, rd, rn, ARMOP_STR, 0, 1, 1, cond))
+#define ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, shift_type, shift) \
+ ARM_STRB_REG_REG_SHIFT_COND(p, rd, rn, rm, shift_type, shift, ARMCOND_AL)
+#define ARM_STRB_REG_REG(p, rd, rn, rm) \
+ ARM_STRB_REG_REG_SHIFT(p, rd, rn, rm, ARMSHIFT_LSL, 0)
+
+
+/* ARMv4+ */
+/* Half-word or byte (signed) transfer. */
+typedef struct {
+ arminstr_t rm : 4; /* imm_lo */
+ arminstr_t tag3 : 1; /* 1 */
+ arminstr_t h : 1; /* half-word or byte */
+ arminstr_t s : 1; /* sign-extend or zero-extend */
+ arminstr_t tag2 : 1; /* 1 */
+ arminstr_t imm_hi : 4;
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t type : 1; /* imm(1) / reg(0) */
+ arminstr_t u : 1; /* +- */
+ arminstr_t p : 1; /* pre/post-index */
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrHXfer;
+
+#define ARM_HXFER_ID 0
+#define ARM_HXFER_ID2 1
+#define ARM_HXFER_ID3 1
+#define ARM_HXFER_MASK ((0x7 << 25) | (0x9 << 4))
+#define ARM_HXFER_TAG ((ARM_HXFER_ID << 25) | (ARM_HXFER_ID2 << 7) | (ARM_HXFER_ID3 << 4))
+
+#define ARM_DEF_HXFER_IMM_COND(imm, h, s, rd, rn, ls, wb, p, cond) \
+ ((imm) < 0?(-(imm)) & 0xF:(imm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((imm) < 0?((-(imm)) << 4) & 0xF00:((imm) << 4) & 0xF00) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (1 << 22) | \
+ (((int)(imm) >= 0) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_LDRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_IMM(p, rd, rn, imm) \
+ ARM_LDRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_IMM(p, rd, rn, imm) \
+ ARM_LDRSH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+#define ARM_LDRSB_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_IMM(p, rd, rn, imm) \
+ ARM_LDRSB_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_STRH_IMM_COND(p, rd, rn, imm, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_IMM_COND(imm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_IMM(p, rd, rn, imm) \
+ ARM_STRH_IMM_COND(p, rd, rn, imm, ARMCOND_AL)
+
+
+#define ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, u, p, cond) \
+ ((rm) & 0xF) | \
+ ((h) << 5) | \
+ ((s) << 6) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((ls) << 20) | \
+ ((wb) << 21) | \
+ (0 << 22) | \
+ ((u) << 23) | \
+ ((p) << 24) | \
+ ARM_HXFER_TAG | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_HXFER_REG_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_UP, p, cond)
+#define ARM_DEF_HXFER_REG_MINUS_REG_COND(rm, h, s, rd, rn, ls, wb, p, cond) \
+ ARM_DEF_HXFER_REG_REG_UPDOWN_COND(rm, h, s, rd, rn, ls, wb, ARM_DOWN, p, cond)
+
+#define ARM_LDRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSH_REG_REG(p, rd, rm, rn) \
+ ARM_LDRSH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+#define ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 0, 1, rd, rn, ARMOP_LDR, 0, 1, cond))
+#define ARM_LDRSB_REG_REG(p, rd, rm, rn) ARM_LDRSB_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+#define ARM_STRH_REG_REG_COND(p, rd, rm, rn, cond) \
+ ARM_EMIT(p, ARM_DEF_HXFER_REG_REG_COND(rm, 1, 0, rd, rn, ARMOP_STR, 0, 1, cond))
+#define ARM_STRH_REG_REG(p, rd, rm, rn) \
+ ARM_STRH_REG_REG_COND(p, rd, rm, rn, ARMCOND_AL)
+
+
+
+/* Swap */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag3 : 8; /* 0x9 */
+ arminstr_t rd : 4;
+ arminstr_t rn : 4;
+ arminstr_t tag2 : 2;
+ arminstr_t b : 1;
+ arminstr_t tag : 5; /* 0x2 */
+ arminstr_t cond : 4;
+} ARMInstrSwap;
+
+#define ARM_SWP_ID 2
+#define ARM_SWP_ID2 9
+#define ARM_SWP_MASK ((0x1F << 23) | (3 << 20) | (0xFF << 4))
+#define ARM_SWP_TAG ((ARM_SWP_ID << 23) | (ARM_SWP_ID2 << 4))
+
+
+
+/* Software interrupt */
+typedef struct {
+ arminstr_t num : 24;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrSWI;
+
+#define ARM_SWI_ID 0xF
+#define ARM_SWI_MASK (0xF << 24)
+#define ARM_SWI_TAG (ARM_SWI_ID << 24)
+
+
+
+/* Co-processor Data Processing */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1; /* 0 */
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4; /* CP number */
+ arminstr_t crd : 4;
+ arminstr_t crn : 4;
+ arminstr_t op : 4;
+ arminstr_t tag : 4; /* 0xE */
+ arminstr_t cond : 4;
+} ARMInstrCDP;
+
+#define ARM_CDP_ID 0xE
+#define ARM_CDP_ID2 0
+#define ARM_CDP_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CDP_TAG ((ARM_CDP_ID << 24) | (ARM_CDP_ID2 << 4))
+
+
+/* Co-processor Data Transfer (ldc/stc) */
+typedef struct {
+ arminstr_t offs : 8;
+ arminstr_t cpn : 4;
+ arminstr_t crd : 4;
+ arminstr_t rn : 4;
+ arminstr_t ls : 1;
+ arminstr_t wb : 1;
+ arminstr_t n : 1;
+ arminstr_t u : 1;
+ arminstr_t p : 1;
+ arminstr_t tag : 3;
+ arminstr_t cond : 4;
+} ARMInstrCDT;
+
+#define ARM_CDT_ID 6
+#define ARM_CDT_MASK (7 << 25)
+#define ARM_CDT_TAG (ARM_CDT_ID << 25)
+
+
+/* Co-processor Register Transfer (mcr/mrc) */
+typedef struct {
+ arminstr_t crm : 4;
+ arminstr_t tag2 : 1;
+ arminstr_t op2 : 3;
+ arminstr_t cpn : 4;
+ arminstr_t rd : 4;
+ arminstr_t crn : 4;
+ arminstr_t ls : 1;
+ arminstr_t op1 : 3;
+ arminstr_t tag : 4;
+ arminstr_t cond : 4;
+} ARMInstrCRT;
+
+#define ARM_CRT_ID 0xE
+#define ARM_CRT_ID2 0x1
+#define ARM_CRT_MASK ((0xF << 24) | (1 << 4))
+#define ARM_CRT_TAG ((ARM_CRT_ID << 24) | (ARM_CRT_ID2 << 4))
+
+/* Move register to PSR. */
+typedef union {
+ ARMDPI_op2_imm op2_imm;
+ struct {
+ arminstr_t rm : 4;
+ arminstr_t pad : 8; /* 0 */
+ arminstr_t tag4 : 4; /* 0xF */
+ arminstr_t fld : 4;
+ arminstr_t tag3 : 2; /* 0x2 */
+ arminstr_t sel : 1;
+ arminstr_t tag2 : 2; /* 0x2 */
+ arminstr_t type : 1;
+ arminstr_t tag : 2; /* 0 */
+ arminstr_t cond : 4;
+ } all;
+} ARMInstrMSR;
+
+#define ARM_MSR_ID 0
+#define ARM_MSR_ID2 2
+#define ARM_MSR_ID3 2
+#define ARM_MSR_ID4 0xF
+#define ARM_MSR_MASK ((3 << 26) | \
+ (3 << 23) | \
+ (3 << 20) | \
+ (0xF << 12))
+#define ARM_MSR_TAG ((ARM_MSR_ID << 26) | \
+ (ARM_MSR_ID2 << 23) | \
+ (ARM_MSR_ID3 << 20) | \
+ (ARM_MSR_ID4 << 12))
+
+
+/* Move PSR to register. */
+typedef struct {
+ arminstr_t tag3 : 12;
+ arminstr_t rd : 4;
+ arminstr_t tag2 : 6;
+ arminstr_t sel : 1; /* CPSR | SPSR */
+ arminstr_t tag : 5;
+ arminstr_t cond : 4;
+} ARMInstrMRS;
+
+#define ARM_MRS_ID 2
+#define ARM_MRS_ID2 0xF
+#define ARM_MRS_ID3 0
+#define ARM_MRS_MASK ((0x1F << 23) | (0x3F << 16) | 0xFFF)
+#define ARM_MRS_TAG ((ARM_MRS_ID << 23) | (ARM_MRS_ID2 << 16) | ARM_MRS_ID3)
+
+
+
+#include "mono/arch/arm/arm_dpimacros.h"
+
+#define ARM_NOP(p) ARM_MOV_REG_REG(p, ARMREG_R0, ARMREG_R0)
+
+
+#define ARM_SHL_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHL_IMM(p, rd, rm, imm) \
+ ARM_SHL_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHLS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, imm, cond)
+#define ARM_SHLS_IMM(p, rd, rm, imm) \
+ ARM_SHLS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHR_IMM(p, rd, rm, imm) \
+ ARM_SHR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SHRS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, imm, cond)
+#define ARM_SHRS_IMM(p, rd, rm, imm) \
+ ARM_SHRS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SAR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SAR_IMM(p, rd, rm, imm) \
+ ARM_SAR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_SARS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, imm, cond)
+#define ARM_SARS_IMM(p, rd, rm, imm) \
+ ARM_SARS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_ROR_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOV_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_ROR_IMM(p, rd, rm, imm) \
+ ARM_ROR_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+#define ARM_RORS_IMM_COND(p, rd, rm, imm, cond) \
+ ARM_MOVS_REG_IMMSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, imm, cond)
+#define ARM_RORS_IMM(p, rd, rm, imm) \
+ ARM_RORS_IMM_COND(p, rd, rm, imm, ARMCOND_AL)
+
+#define ARM_SHL_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHL_REG(p, rd, rm, rs) \
+ ARM_SHL_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSL, rs, cond)
+#define ARM_SHLS_REG(p, rd, rm, rs) \
+ ARM_SHLS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHLS_REG_REG(p, rd, rm, rs) ARM_SHLS_REG(p, rd, rm, rs)
+
+#define ARM_SHR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHR_REG(p, rd, rm, rs) \
+ ARM_SHR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_LSR, rs, cond)
+#define ARM_SHRS_REG(p, rd, rm, rs) \
+ ARM_SHRS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SHRS_REG_REG(p, rd, rm, rs) ARM_SHRS_REG(p, rd, rm, rs)
+
+#define ARM_SAR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SAR_REG(p, rd, rm, rs) \
+ ARM_SAR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ASR, rs, cond)
+#define ARM_SARS_REG(p, rd, rm, rs) \
+ ARM_SARS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_SARS_REG_REG(p, rd, rm, rs) ARM_SARS_REG(p, rd, rm, rs)
+
+#define ARM_ROR_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOV_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_ROR_REG(p, rd, rm, rs) \
+ ARM_ROR_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_COND(p, rd, rm, rs, cond) \
+ ARM_MOVS_REG_REGSHIFT_COND(p, rd, rm, ARMSHIFT_ROR, rs, cond)
+#define ARM_RORS_REG(p, rd, rm, rs) \
+ ARM_RORS_REG_COND(p, rd, rm, rs, ARMCOND_AL)
+#define ARM_RORS_REG_REG(p, rd, rm, rs) ARM_RORS_REG(p, rd, rm, rs)
+
+#ifdef __native_client_codegen__
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE7FEDEF0)
+#else
+#define ARM_DBRK(p) ARM_EMIT(p, 0xE6000010)
+#endif
+#define ARM_IASM_DBRK() ARM_IASM_EMIT(0xE6000010)
+
+#define ARM_INC(p, reg) ARM_ADD_REG_IMM8(p, reg, reg, 1)
+#define ARM_DEC(p, reg) ARM_SUB_REG_IMM8(p, reg, reg, 1)
+
+#define ARM_MLS(p, rd, rn, rm, ra) ARM_EMIT((p), (ARMCOND_AL << 28) | (0x6 << 20) | ((rd) << 16) | ((ra) << 12) | ((rm) << 8) | (0x9 << 4) | ((rn) << 0))
+
+/* ARM V5 */
+
+/* Count leading zeros, CLZ{cond} Rd, Rm */
+typedef struct {
+ arminstr_t rm : 4;
+ arminstr_t tag2 : 8;
+ arminstr_t rd : 4;
+ arminstr_t tag : 12;
+ arminstr_t cond : 4;
+} ARMInstrCLZ;
+
+#define ARM_CLZ_ID 0x16F
+#define ARM_CLZ_ID2 0xF1
+#define ARM_CLZ_MASK ((0xFFF << 16) | (0xFF < 4))
+#define ARM_CLZ_TAG ((ARM_CLZ_ID << 16) | (ARM_CLZ_ID2 << 4))
+
+
+
+
+typedef union {
+ ARMInstrBR br;
+ ARMInstrDPI dpi;
+ ARMInstrMRT mrt;
+ ARMInstrMul mul;
+ ARMInstrWXfer wxfer;
+ ARMInstrHXfer hxfer;
+ ARMInstrSwap swp;
+ ARMInstrCDP cdp;
+ ARMInstrCDT cdt;
+ ARMInstrCRT crt;
+ ARMInstrSWI swi;
+ ARMInstrMSR msr;
+ ARMInstrMRS mrs;
+ ARMInstrCLZ clz;
+
+ ARMInstrGeneric generic;
+ arminstr_t raw;
+} ARMInstr;
+
+/* ARMv6t2 */
+
+#define ARM_MOVW_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (0 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVW_REG_IMM(p, rd, imm16) ARM_MOVW_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+#define ARM_MOVT_REG_IMM_COND(p, rd, imm16, cond) ARM_EMIT(p, (((cond) << 28) | (3 << 24) | (4 << 20) | ((((guint32)(imm16)) >> 12) << 16) | ((rd) << 12) | (((guint32)(imm16)) & 0xfff)))
+#define ARM_MOVT_REG_IMM(p, rd, imm16) ARM_MOVT_REG_IMM_COND ((p), (rd), (imm16), ARMCOND_AL)
+
+/* MCR */
+#define ARM_DEF_MCR_COND(coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_DEF_COND ((cond)) | ((0xe << 24) | (((opc1) & 0x7) << 21) | (0 << 20) | (((crn) & 0xf) << 16) | (((rt) & 0xf) << 12) | (((coproc) & 0xf) << 8) | (((opc2) & 0x7) << 5) | (1 << 4) | (((crm) & 0xf) << 0))
+
+#define ARM_MCR_COND(p, coproc, opc1, rt, crn, crm, opc2, cond) \
+ ARM_EMIT(p, ARM_DEF_MCR_COND ((coproc), (opc1), (rt), (crn), (crm), (opc2), (cond)))
+
+#define ARM_MCR(p, coproc, opc1, rt, crn, crm, opc2) \
+ ARM_MCR_COND ((p), (coproc), (opc1), (rt), (crn), (crm), (opc2), ARMCOND_AL)
+
+/* ARMv7VE */
+#define ARM_SDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x1 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_SDIV(p, rd, rn, rm) ARM_SDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+#define ARM_UDIV_COND(p, rd, rn, rm, cond) ARM_EMIT (p, (((cond) << 28) | (0xe << 23) | (0x3 << 20) | ((rd) << 16) | (0xf << 12) | ((rm) << 8) | (0x0 << 5) | (0x1 << 4) | ((rn) << 0)))
+#define ARM_UDIV(p, rd, rn, rm) ARM_UDIV_COND ((p), (rd), (rn), (rm), ARMCOND_AL)
+
+/* ARMv7 */
+
+typedef enum {
+ ARM_DMB_SY = 0xf,
+} ArmDmbFlags;
+
+#define ARM_DMB(p, option) ARM_EMIT ((p), ((0xf << 28) | (0x57 << 20) | (0xf << 16) | (0xf << 12) | (0x0 << 8) | (0x5 << 4) | ((option) << 0)))
+
+#define ARM_LDREX_REG(p, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x1 << 20) | ((rn) << 16) | ((rt) << 12)) | (0xf << 8) | (0x9 << 4) | 0xf << 0)
+
+#define ARM_STREX_REG(p, rd, rt, rn) ARM_EMIT ((p), ((ARMCOND_AL << 28) | (0xc << 21) | (0x0 << 20) | ((rn) << 16) | ((rd) << 12)) | (0xf << 8) | (0x9 << 4) | ((rt) << 0))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_H */
+
diff --git a/src/arch/arm/arm-dis.c b/src/arch/arm/arm-dis.c
new file mode 100644
index 0000000..5074f26
--- /dev/null
+++ b/src/arch/arm/arm-dis.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+
+#include <stdarg.h>
+
+#include "arm-dis.h"
+#include "arm-codegen.h"
+
+
+static ARMDis* gdisasm = NULL;
+
+static int use_reg_alias = 1;
+
+const static char* cond[] = {
+ "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt", "gt", "le", "", "nv"
+};
+
+const static char* ops[] = {
+ "and", "eor", "sub", "rsb", "add", "adc", "sbc", "rsc",
+ "tst", "teq", "cmp", "cmn", "orr", "mov", "bic", "mvn"
+};
+
+const static char* shift_types[] = {"lsl", "lsr", "asr", "ror"};
+
+const static char* mul_ops[] = {
+ "mul", "mla", "?", "?", "umull", "umlal", "smull", "smlal"
+};
+
+const static char* reg_alias[] = {
+ "a1", "a2", "a3", "a4",
+ "r4", "r5", "r6", "r7", "r8", "r9", "r10",
+ "fp", "ip", "sp", "lr", "pc"
+};
+
+const static char* msr_fld[] = {"f", "c", "x", "?", "s"};
+
+
+/* private functions prototypes (to keep compiler happy) */
+void chk_out(ARMDis* dis);
+void dump_reg(ARMDis* dis, int reg);
+void dump_creg(ARMDis* dis, int creg);
+void dump_reglist(ARMDis* dis, int reg_list);
+void init_gdisasm(void);
+
+void dump_br(ARMDis* dis, ARMInstr i);
+void dump_cdp(ARMDis* dis, ARMInstr i);
+void dump_cdt(ARMDis* dis, ARMInstr i);
+void dump_crt(ARMDis* dis, ARMInstr i);
+void dump_dpi(ARMDis* dis, ARMInstr i);
+void dump_hxfer(ARMDis* dis, ARMInstr i);
+void dump_mrs(ARMDis* dis, ARMInstr i);
+void dump_mrt(ARMDis* dis, ARMInstr i);
+void dump_msr(ARMDis* dis, ARMInstr i);
+void dump_mul(ARMDis* dis, ARMInstr i);
+void dump_swi(ARMDis* dis, ARMInstr i);
+void dump_swp(ARMDis* dis, ARMInstr i);
+void dump_wxfer(ARMDis* dis, ARMInstr i);
+void dump_clz(ARMDis* dis, ARMInstr i);
+
+
+/*
+void out(ARMDis* dis, const char* format, ...) {
+ va_list arglist;
+ va_start(arglist, format);
+ fprintf(dis->dis_out, format, arglist);
+ va_end(arglist);
+}
+*/
+
+
+void chk_out(ARMDis* dis) {
+ if (dis != NULL && dis->dis_out == NULL) dis->dis_out = stdout;
+}
+
+
+void armdis_set_output(ARMDis* dis, FILE* f) {
+ if (dis != NULL) {
+ dis->dis_out = f;
+ chk_out(dis);
+ }
+}
+
+FILE* armdis_get_output(ARMDis* dis) {
+ return (dis != NULL ? dis->dis_out : NULL);
+}
+
+
+
+
+void dump_reg(ARMDis* dis, int reg) {
+ reg &= 0xF;
+ if (!use_reg_alias || (reg > 3 && reg < 11)) {
+ fprintf(dis->dis_out, "r%d", reg);
+ } else {
+ fprintf(dis->dis_out, "%s", reg_alias[reg]);
+ }
+}
+
+void dump_creg(ARMDis* dis, int creg) {
+ if (dis != NULL) {
+ creg &= 0xF;
+ fprintf(dis->dis_out, "c%d", creg);
+ }
+}
+
+void dump_reglist(ARMDis* dis, int reg_list) {
+ int i = 0, j, n = 0;
+ int m1 = 1, m2, rn;
+ while (i < 16) {
+ if ((reg_list & m1) != 0) {
+ if (n != 0) fprintf(dis->dis_out, ", ");
+ n++;
+ dump_reg(dis, i);
+ for (j = i+1, rn = 0, m2 = m1<<1; j < 16; ++j, m2<<=1) {
+ if ((reg_list & m2) != 0) ++rn;
+ else break;
+ }
+ i+=rn;
+ if (rn > 1) {
+ fprintf(dis->dis_out, "-");
+ dump_reg(dis, i);
+ } else if (rn == 1) {
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i);
+ }
+ m1<<=(rn+1);
+ i++;
+ } else {
+ ++i;
+ m1<<=1;
+ }
+ }
+}
+
+
+void dump_br(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "b%s%s\t%x\t; %p -> %#x",
+ (i.br.link == 1) ? "l" : "",
+ cond[i.br.cond], i.br.offset, dis->pi, (int)dis->pi + 4*2 + ((int)(i.br.offset << 8) >> 6));
+}
+
+
+void dump_dpi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s", ops[i.dpi.all.opcode], cond[i.dpi.all.cond]);
+
+ if ((i.dpi.all.opcode < ARMOP_TST || i.dpi.all.opcode > ARMOP_CMN) && (i.dpi.all.s != 0)) {
+ fprintf(dis->dis_out, "s");
+ }
+
+ fprintf(dis->dis_out, "\t");
+
+ if ((i.dpi.all.opcode < ARMOP_TST) || (i.dpi.all.opcode > ARMOP_CMN)) {
+ /* for comparison operations Rd is ignored */
+ dump_reg(dis, i.dpi.all.rd);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if ((i.dpi.all.opcode != ARMOP_MOV) && (i.dpi.all.opcode != ARMOP_MVN)) {
+ /* for MOV/MVN Rn is ignored */
+ dump_reg(dis, i.dpi.all.rn);
+ fprintf(dis->dis_out, ", ");
+ }
+
+ if (i.dpi.all.type == 1) {
+ /* immediate */
+ if (i.dpi.op2_imm.rot != 0) {
+ fprintf(dis->dis_out, "#%d, %d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.rot << 1,
+ ARM_SCALE(i.dpi.op2_imm.imm, (i.dpi.op2_imm.rot << 1)) );
+ } else {
+ fprintf(dis->dis_out, "#%d\t; 0x%x", i.dpi.op2_imm.imm, i.dpi.op2_imm.imm);
+ }
+ } else {
+ /* reg-reg */
+ if (i.dpi.op2_reg.tag == 0) {
+ /* op2 is reg shift by imm */
+ dump_reg(dis, i.dpi.op2_reg_imm.r2.rm);
+ if (i.dpi.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.dpi.op2_reg_imm.r2.type], i.dpi.op2_reg_imm.imm.shift);
+ }
+ } else {
+ /* op2 is reg shift by reg */
+ dump_reg(dis, i.dpi.op2_reg_reg.r2.rm);
+ fprintf(dis->dis_out, " %s ", shift_types[i.dpi.op2_reg_reg.r2.type]);
+ dump_reg(dis, i.dpi.op2_reg_reg.reg.rs);
+ }
+
+ }
+}
+
+void dump_wxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.wxfer.all.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.wxfer.all.b == 0) ? "" : "b",
+ (i.wxfer.all.ls != 0 && i.wxfer.all.wb != 0) ? "t" : "");
+ dump_reg(dis, i.wxfer.all.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.wxfer.all.rn);
+ fprintf(dis->dis_out, "%s, ", (i.wxfer.all.p == 0) ? "]" : "");
+
+ if (i.wxfer.all.type == 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.wxfer.all.u == 0) ? "-" : "", i.wxfer.all.op2_imm);
+ } else {
+ dump_reg(dis, i.wxfer.op2_reg_imm.r2.rm);
+ if (i.wxfer.op2_reg_imm.imm.shift != 0) {
+ fprintf(dis->dis_out, " %s #%d", shift_types[i.wxfer.op2_reg_imm.r2.type], i.wxfer.op2_reg_imm.imm.shift);
+ }
+ }
+
+ if (i.wxfer.all.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.wxfer.all.wb != 0) ? "!" : "");
+ }
+}
+
+void dump_hxfer(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t",
+ (i.hxfer.ls == 0) ? "str" : "ldr",
+ cond[i.generic.cond],
+ (i.hxfer.s != 0) ? "s" : "",
+ (i.hxfer.h != 0) ? "h" : "b");
+ dump_reg(dis, i.hxfer.rd);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.hxfer.rn);
+ fprintf(dis->dis_out, "%s, ", (i.hxfer.p == 0) ? "]" : "");
+
+ if (i.hxfer.type != 0) { /* imm */
+ fprintf(dis->dis_out, "#%s%d", (i.hxfer.u == 0) ? "-" : "", (i.hxfer.imm_hi << 4) | i.hxfer.rm);
+ } else {
+ dump_reg(dis, i.hxfer.rm);
+ }
+
+ if (i.hxfer.p != 0) {
+ /* close pre-index instr, also check for write-back */
+ fprintf(dis->dis_out, "]%s", (i.hxfer.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_mrt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s%s\t", (i.mrt.ls == 0) ? "stm" : "ldm", cond[i.mrt.cond],
+ (i.mrt.u == 0) ? "d" : "i", (i.mrt.p == 0) ? "a" : "b");
+ dump_reg(dis, i.mrt.rn);
+ fprintf(dis->dis_out, "%s, {", (i.mrt.wb != 0) ? "!" : "");
+ dump_reglist(dis, i.mrt.reg_list);
+ fprintf(dis->dis_out, "}");
+}
+
+
+void dump_swp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swp%s%s ", cond[i.swp.cond], (i.swp.b != 0) ? "b" : "");
+ dump_reg(dis, i.swp.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.swp.rm);
+ fprintf(dis->dis_out, ", [");
+ dump_reg(dis, i.swp.rn);
+ fprintf(dis->dis_out, "]");
+}
+
+
+void dump_mul(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\t", mul_ops[i.mul.opcode], cond[i.mul.cond], (i.mul.s != 0) ? "s" : "");
+ switch (i.mul.opcode) {
+ case ARMOP_MUL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ case ARMOP_MLA:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ break;
+ case ARMOP_UMULL:
+ case ARMOP_UMLAL:
+ case ARMOP_SMULL:
+ case ARMOP_SMLAL:
+ dump_reg(dis, i.mul.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rn);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rm);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.mul.rs);
+ break;
+ default:
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", i.raw);
+ break;
+ }
+}
+
+
+void dump_cdp(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "cdp%s\tp%d, %d, ", cond[i.generic.cond], i.cdp.cpn, i.cdp.op);
+ dump_creg(dis, i.cdp.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.cdp.crm);
+
+ if (i.cdp.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.cdp.op2);
+ }
+}
+
+
+void dump_cdt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s%s\tp%d, ", (i.cdt.ls == 0) ? "stc" : "ldc",
+ cond[i.generic.cond], (i.cdt.n != 0) ? "l" : "", i.cdt.cpn);
+ dump_creg(dis, i.cdt.crd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.cdt.rn);
+
+ if (i.cdt.p == 0) {
+ fprintf(dis->dis_out, "]");
+ }
+
+ if (i.cdt.offs != 0) {
+ fprintf(dis->dis_out, ", #%d", i.cdt.offs);
+ }
+
+ if (i.cdt.p != 0) {
+ fprintf(dis->dis_out, "]%s", (i.cdt.wb != 0) ? "!" : "");
+ }
+}
+
+
+void dump_crt(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "%s%s\tp%d, %d, ", (i.crt.ls == 0) ? "mrc" : "mcr",
+ cond[i.generic.cond], i.crt.cpn, i.crt.op1);
+ dump_reg(dis, i.crt.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crn);
+ fprintf(dis->dis_out, ", ");
+ dump_creg(dis, i.crt.crm);
+
+ if (i.crt.op2 != 0) {
+ fprintf(dis->dis_out, ", %d", i.crt.op2);
+ }
+}
+
+
+void dump_msr(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "msr%s\t%spsr_, ", cond[i.generic.cond],
+ (i.msr.all.sel == 0) ? "s" : "c");
+ if (i.msr.all.type == 0) {
+ /* reg */
+ fprintf(dis->dis_out, "%s, ", msr_fld[i.msr.all.fld]);
+ dump_reg(dis, i.msr.all.rm);
+ } else {
+ /* imm */
+ fprintf(dis->dis_out, "f, #%d", i.msr.op2_imm.imm << i.msr.op2_imm.rot);
+ }
+}
+
+
+void dump_mrs(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "mrs%s\t", cond[i.generic.cond]);
+ dump_reg(dis, i.mrs.rd);
+ fprintf(dis->dis_out, ", %spsr", (i.mrs.sel == 0) ? "s" : "c");
+}
+
+
+void dump_swi(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "swi%s\t%d", cond[i.generic.cond], i.swi.num);
+}
+
+
+void dump_clz(ARMDis* dis, ARMInstr i) {
+ fprintf(dis->dis_out, "clz\t");
+ dump_reg(dis, i.clz.rd);
+ fprintf(dis->dis_out, ", ");
+ dump_reg(dis, i.clz.rm);
+ fprintf(dis->dis_out, "\n");
+}
+
+
+
+void armdis_decode(ARMDis* dis, void* p, int size) {
+ int i;
+ arminstr_t* pi = (arminstr_t*)p;
+ ARMInstr instr;
+
+ if (dis == NULL) return;
+
+ chk_out(dis);
+
+ size/=sizeof(arminstr_t);
+
+ for (i=0; i<size; ++i) {
+ fprintf(dis->dis_out, "%p:\t%08x\t", pi, *pi);
+ dis->pi = pi;
+ instr.raw = *pi++;
+
+ if ((instr.raw & ARM_BR_MASK) == ARM_BR_TAG) {
+ dump_br(dis, instr);
+ } else if ((instr.raw & ARM_SWP_MASK) == ARM_SWP_TAG) {
+ dump_swp(dis, instr);
+ } else if ((instr.raw & ARM_MUL_MASK) == ARM_MUL_TAG) {
+ dump_mul(dis, instr);
+ } else if ((instr.raw & ARM_CLZ_MASK) == ARM_CLZ_TAG) {
+ dump_clz(dis, instr);
+ } else if ((instr.raw & ARM_WXFER_MASK) == ARM_WXFER_TAG) {
+ dump_wxfer(dis, instr);
+ } else if ((instr.raw & ARM_HXFER_MASK) == ARM_HXFER_TAG) {
+ dump_hxfer(dis, instr);
+ } else if ((instr.raw & ARM_DPI_MASK) == ARM_DPI_TAG) {
+ dump_dpi(dis, instr);
+ } else if ((instr.raw & ARM_MRT_MASK) == ARM_MRT_TAG) {
+ dump_mrt(dis, instr);
+ } else if ((instr.raw & ARM_CDP_MASK) == ARM_CDP_TAG) {
+ dump_cdp(dis, instr);
+ } else if ((instr.raw & ARM_CDT_MASK) == ARM_CDT_TAG) {
+ dump_cdt(dis, instr);
+ } else if ((instr.raw & ARM_CRT_MASK) == ARM_CRT_TAG) {
+ dump_crt(dis, instr);
+ } else if ((instr.raw & ARM_MSR_MASK) == ARM_MSR_TAG) {
+ dump_msr(dis, instr);
+ } else if ((instr.raw & ARM_MRS_MASK) == ARM_MRS_TAG) {
+ dump_mrs(dis, instr);
+ } else if ((instr.raw & ARM_SWI_MASK) == ARM_SWI_TAG) {
+ dump_swi(dis, instr);
+ } else {
+ fprintf(dis->dis_out, "DCD 0x%x\t; <unknown>", instr.raw);
+ }
+
+ fprintf(dis->dis_out, "\n");
+ }
+}
+
+
+void armdis_open(ARMDis* dis, const char* dump_name) {
+ if (dis != NULL && dump_name != NULL) {
+ armdis_set_output(dis, fopen(dump_name, "w"));
+ }
+}
+
+
+void armdis_close(ARMDis* dis) {
+ if (dis->dis_out != NULL && dis->dis_out != stdout && dis->dis_out != stderr) {
+ fclose(dis->dis_out);
+ dis->dis_out = NULL;
+ }
+}
+
+
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size) {
+ armdis_open(dis, dump_name);
+ armdis_decode(dis, p, size);
+ armdis_close(dis);
+}
+
+
+void armdis_init(ARMDis* dis) {
+ if (dis != NULL) {
+ /* set to stdout */
+ armdis_set_output(dis, NULL);
+ }
+}
+
+
+
+
+void init_gdisasm() {
+ if (gdisasm == NULL) {
+ gdisasm = (ARMDis*)malloc(sizeof(ARMDis));
+ armdis_init(gdisasm);
+ }
+}
+
+void _armdis_set_output(FILE* f) {
+ init_gdisasm();
+ armdis_set_output(gdisasm, f);
+}
+
+FILE* _armdis_get_output() {
+ init_gdisasm();
+ return armdis_get_output(gdisasm);
+}
+
+void _armdis_decode(void* p, int size) {
+ init_gdisasm();
+ armdis_decode(gdisasm, p, size);
+}
+
+void _armdis_open(const char* dump_name) {
+ init_gdisasm();
+ armdis_open(gdisasm, dump_name);
+}
+
+void _armdis_close() {
+ init_gdisasm();
+ armdis_close(gdisasm);
+}
+
+void _armdis_dump(const char* dump_name, void* p, int size) {
+ init_gdisasm();
+ armdis_dump(gdisasm, dump_name, p, size);
+}
+
diff --git a/src/arch/arm/arm-dis.h b/src/arch/arm/arm-dis.h
new file mode 100644
index 0000000..8019499
--- /dev/null
+++ b/src/arch/arm/arm-dis.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ */
+
+#ifndef ARM_DIS
+#define ARM_DIS
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct _ARMDis {
+ FILE* dis_out;
+ void* pi;
+} ARMDis;
+
+
+void _armdis_set_output(FILE* f);
+FILE* _armdis_get_output(void);
+void _armdis_decode(void* p, int size);
+void _armdis_open(const char* dump_name);
+void _armdis_close(void);
+void _armdis_dump(const char* dump_name, void* p, int size);
+
+
+void armdis_init(ARMDis* dis);
+void armdis_set_output(ARMDis* dis, FILE* f);
+FILE* armdis_get_output(ARMDis* dis);
+void armdis_decode(ARMDis* dis, void* p, int size);
+void armdis_open(ARMDis* dis, const char* dump_name);
+void armdis_close(ARMDis* dis);
+void armdis_dump(ARMDis* dis, const char* dump_name, void* p, int size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* ARM_DIS */
diff --git a/src/arch/arm/arm-vfp-codegen.h b/src/arch/arm/arm-vfp-codegen.h
new file mode 100644
index 0000000..8b56b00
--- /dev/null
+++ b/src/arch/arm/arm-vfp-codegen.h
@@ -0,0 +1,247 @@
+//
+// Copyright 2011 Xamarin Inc
+//
+
+#ifndef __MONO_ARM_VFP_CODEGEN_H__
+#define __MONO_ARM_VFP_CODEGEN_H__
+
+#include "arm-codegen.h"
+
+enum {
+ /* VFP registers */
+ ARM_VFP_F0,
+ ARM_VFP_F1,
+ ARM_VFP_F2,
+ ARM_VFP_F3,
+ ARM_VFP_F4,
+ ARM_VFP_F5,
+ ARM_VFP_F6,
+ ARM_VFP_F7,
+ ARM_VFP_F8,
+ ARM_VFP_F9,
+ ARM_VFP_F10,
+ ARM_VFP_F11,
+ ARM_VFP_F12,
+ ARM_VFP_F13,
+ ARM_VFP_F14,
+ ARM_VFP_F15,
+ ARM_VFP_F16,
+ ARM_VFP_F17,
+ ARM_VFP_F18,
+ ARM_VFP_F19,
+ ARM_VFP_F20,
+ ARM_VFP_F21,
+ ARM_VFP_F22,
+ ARM_VFP_F23,
+ ARM_VFP_F24,
+ ARM_VFP_F25,
+ ARM_VFP_F26,
+ ARM_VFP_F27,
+ ARM_VFP_F28,
+ ARM_VFP_F29,
+ ARM_VFP_F30,
+ ARM_VFP_F31,
+
+ ARM_VFP_D0 = ARM_VFP_F0,
+ ARM_VFP_D1 = ARM_VFP_F2,
+ ARM_VFP_D2 = ARM_VFP_F4,
+ ARM_VFP_D3 = ARM_VFP_F6,
+ ARM_VFP_D4 = ARM_VFP_F8,
+ ARM_VFP_D5 = ARM_VFP_F10,
+ ARM_VFP_D6 = ARM_VFP_F12,
+ ARM_VFP_D7 = ARM_VFP_F14,
+ ARM_VFP_D8 = ARM_VFP_F16,
+ ARM_VFP_D9 = ARM_VFP_F18,
+ ARM_VFP_D10 = ARM_VFP_F20,
+ ARM_VFP_D11 = ARM_VFP_F22,
+ ARM_VFP_D12 = ARM_VFP_F24,
+ ARM_VFP_D13 = ARM_VFP_F26,
+ ARM_VFP_D14 = ARM_VFP_F28,
+ ARM_VFP_D15 = ARM_VFP_F30,
+
+ ARM_VFP_COPROC_SINGLE = 10,
+ ARM_VFP_COPROC_DOUBLE = 11,
+
+#define ARM_VFP_OP(p,q,r,s) (((p) << 23) | ((q) << 21) | ((r) << 20) | ((s) << 6))
+#define ARM_VFP_OP2(Fn,N) (ARM_VFP_OP (1,1,1,1) | ((Fn) << 16) | ((N) << 7))
+
+ ARM_VFP_MUL = ARM_VFP_OP (0,1,0,0),
+ ARM_VFP_NMUL = ARM_VFP_OP (0,1,0,1),
+ ARM_VFP_ADD = ARM_VFP_OP (0,1,1,0),
+ ARM_VFP_SUB = ARM_VFP_OP (0,1,1,1),
+ ARM_VFP_DIV = ARM_VFP_OP (1,0,0,0),
+
+ ARM_VFP_CPY = ARM_VFP_OP2 (0,0),
+ ARM_VFP_ABS = ARM_VFP_OP2 (0,1),
+ ARM_VFP_NEG = ARM_VFP_OP2 (1,0),
+ ARM_VFP_SQRT = ARM_VFP_OP2 (1,1),
+ ARM_VFP_CMP = ARM_VFP_OP2 (4,0),
+ ARM_VFP_CMPE = ARM_VFP_OP2 (4,1),
+ ARM_VFP_CMPZ = ARM_VFP_OP2 (5,0),
+ ARM_VFP_CMPEZ = ARM_VFP_OP2 (5,1),
+ ARM_VFP_CVT = ARM_VFP_OP2 (7,1),
+ ARM_VFP_UITO = ARM_VFP_OP2 (8,0),
+ ARM_VFP_SITO = ARM_VFP_OP2 (8,1),
+ ARM_VFP_TOUI = ARM_VFP_OP2 (12,0),
+ ARM_VFP_TOSI = ARM_VFP_OP2 (13,0),
+ ARM_VFP_TOUIZ = ARM_VFP_OP2 (12,1),
+ ARM_VFP_TOSIZ = ARM_VFP_OP2 (13,1),
+
+ ARM_VFP_SID = 0,
+ ARM_VFP_SCR = 1 << 1,
+ ARM_VFP_EXC = 8 << 1
+};
+
+#define ARM_DEF_VFP_DYADIC(cond,cp,op,Fd,Fn,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_MONADIC(cond,cp,op,Fd,Fm) \
+ (14 << 24) | \
+ ((cp) << 8) | \
+ (op) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ (((Fm) >> 1) << 0) | \
+ (((Fm) & 1) << 5) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_LSF(cond,cp,post,ls,wback,basereg,Fd,offset) \
+ ((offset) >= 0? (offset)>>2: -(offset)>>2) | \
+ (6 << 25) | \
+ ((cp) << 8) | \
+ (((Fd) >> 1) << 12) | \
+ (((Fd) & 1) << 22) | \
+ ((basereg) << 16) | \
+ ((ls) << 20) | \
+ ((wback) << 21) | \
+ (((offset) >= 0) << 23) | \
+ ((wback) << 21) | \
+ ((post) << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_DEF_VFP_CPT(cond,cp,op,L,Fn,Rd) \
+ (14 << 24) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((op) << 21) | \
+ ((L) << 20) | \
+ ((Rd) << 12) | \
+ (((Fn) >> 1) << 16) | \
+ (((Fn) & 1) << 7) | \
+ ARM_DEF_COND(cond)
+
+/* FP load and stores */
+#define ARM_FLDS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDS(p,freg,base,offset) \
+ ARM_FLDS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_LDR,0,(base),(freg),(offset)))
+#define ARM_FLDD(p,freg,base,offset) \
+ ARM_FLDD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTS_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_SINGLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTS(p,freg,base,offset) \
+ ARM_FSTS_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FSTD_COND(p,freg,base,offset,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,1,ARMOP_STR,0,(base),(freg),(offset)))
+#define ARM_FSTD(p,freg,base,offset) \
+ ARM_FSTD_COND(p,freg,base,offset,ARMCOND_AL)
+
+#define ARM_FLDMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_LDR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FLDMD(p,first_reg,nregs,base) \
+ ARM_FLDMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#define ARM_FSTMD_COND(p,first_reg,nregs,base,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_LSF((cond),ARM_VFP_COPROC_DOUBLE,0,ARMOP_STR,0,(base),(first_reg),((nregs) * 2) << 2))
+
+#define ARM_FSTMD(p,first_reg,nregs,base) \
+ ARM_FSTMD_COND(p,first_reg,nregs,base,ARMCOND_AL)
+
+#include <mono/arch/arm/arm_vfpmacros.h>
+
+/* coprocessor register transfer */
+#define ARM_FMSR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,0,(freg),(reg)))
+#define ARM_FMRS(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,0,1,(freg),(reg)))
+
+#define ARM_FMDLR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,0,(freg),(reg)))
+#define ARM_FMRDL(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,0,1,(freg),(reg)))
+#define ARM_FMDHR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,0,(freg),(reg)))
+#define ARM_FMRDH(p,reg,freg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,1,1,(freg),(reg)))
+
+#define ARM_FMXR(p,freg,reg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,0,(freg),(reg)))
+#define ARM_FMRX(p,reg,fcreg) \
+ ARM_EMIT((p), ARM_DEF_VFP_CPT(ARMCOND_AL,ARM_VFP_COPROC_SINGLE,7,1,(fcreg),(reg)))
+
+#define ARM_FMSTAT(p) \
+ ARM_FMRX((p),ARMREG_R15,ARM_VFP_SCR)
+
+#define ARM_DEF_MCRR(cond,cp,rn,rd,Fm,M) \
+ ((Fm) << 0) | \
+ (1 << 4) | \
+ ((M) << 5) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((2) << 21) | \
+ (12 << 24) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMDRR(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_MCRR(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FMRRD(cond,cp,rn,rd,Dm,D) \
+ ((Dm) << 0) | \
+ (1 << 4) | \
+ ((cp) << 8) | \
+ ((rd) << 12) | \
+ ((rn) << 16) | \
+ ((0xc5) << 20) | \
+ ARM_DEF_COND(cond)
+
+#define ARM_FMRRD(p,rd,rn,dm) \
+ ARM_EMIT((p), ARM_DEF_FMRRD(ARMCOND_AL,ARM_VFP_COPROC_DOUBLE,(rn),(rd),(dm) >> 1, (dm) & 1))
+
+#define ARM_DEF_FUITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FUITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FUITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FUITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOS(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xa) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOS(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOS (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#define ARM_DEF_FSITOD(cond,Dd,D,Fm,M) ((cond) << 28) | ((0x1d) << 23) | ((D) << 22) | ((0x3) << 20) | ((8) << 16) | ((Dd) << 12) | ((0xb) << 8) | ((1) << 7) | ((1) << 6) | ((M) << 5) | ((Fm) << 0)
+
+#define ARM_FSITOD(p,dreg,sreg) \
+ ARM_EMIT((p), ARM_DEF_FSITOD (ARMCOND_AL, (dreg) >> 1, (dreg) & 1, (sreg) >> 1, (sreg) & 1))
+
+#endif /* __MONO_ARM_VFP_CODEGEN_H__ */
+
diff --git a/src/arch/arm/arm-wmmx.h b/src/arch/arm/arm-wmmx.h
new file mode 100644
index 0000000..427c4fc
--- /dev/null
+++ b/src/arch/arm/arm-wmmx.h
@@ -0,0 +1,177 @@
+/*
+ * ARM CodeGen
+ * XScale WirelessMMX extensions
+ * Copyright 2002 Wild West Software
+ */
+
+#ifndef __WMMX_H__
+#define __WMMX_H__ 1
+
+#if 0
+#include <arm-codegen.h>
+#endif
+
+#if defined(ARM_IASM)
+# define WM_ASM(_expr) ARM_IASM(_expr)
+#else
+# define WM_ASM(_expr) __emit (_expr)
+#endif
+
+#if defined(ARM_EMIT)
+# define WM_EMIT(p, i) ARM_EMIT(p, i)
+#else
+# define WM_EMIT(p, i)
+#endif
+
+enum {
+ WM_CC_EQ = 0x0,
+ WM_CC_NE = 0x1,
+ WM_CC_CS = 0x2,
+ WM_CC_HS = WM_CC_CS,
+ WM_CC_CC = 0x3,
+ WM_CC_LO = WM_CC_CC,
+ WM_CC_MI = 0x4,
+ WM_CC_PL = 0x5,
+ WM_CC_VS = 0x6,
+ WM_CC_VC = 0x7,
+ WM_CC_HI = 0x8,
+ WM_CC_LS = 0x9,
+ WM_CC_GE = 0xA,
+ WM_CC_LT = 0xB,
+ WM_CC_GT = 0xC,
+ WM_CC_LE = 0xD,
+ WM_CC_AL = 0xE,
+ WM_CC_NV = 0xF,
+ WM_CC_SHIFT = 28
+};
+
+#if defined(ARM_DEF_COND)
+# define WM_DEF_CC(_cc) ARM_DEF_COND(_cc)
+#else
+# define WM_DEF_CC(_cc) ((_cc & 0xF) << WM_CC_SHIFT)
+#endif
+
+
+enum {
+ WM_R0 = 0x0,
+ WM_R1 = 0x1,
+ WM_R2 = 0x2,
+ WM_R3 = 0x3,
+ WM_R4 = 0x4,
+ WM_R5 = 0x5,
+ WM_R6 = 0x6,
+ WM_R7 = 0x7,
+ WM_R8 = 0x8,
+ WM_R9 = 0x9,
+ WM_R10 = 0xA,
+ WM_R11 = 0xB,
+ WM_R12 = 0xC,
+ WM_R13 = 0xD,
+ WM_R14 = 0xE,
+ WM_R15 = 0xF,
+
+ WM_wR0 = 0x0,
+ WM_wR1 = 0x1,
+ WM_wR2 = 0x2,
+ WM_wR3 = 0x3,
+ WM_wR4 = 0x4,
+ WM_wR5 = 0x5,
+ WM_wR6 = 0x6,
+ WM_wR7 = 0x7,
+ WM_wR8 = 0x8,
+ WM_wR9 = 0x9,
+ WM_wR10 = 0xA,
+ WM_wR11 = 0xB,
+ WM_wR12 = 0xC,
+ WM_wR13 = 0xD,
+ WM_wR14 = 0xE,
+ WM_wR15 = 0xF
+};
+
+
+/*
+ * Qualifiers:
+ * H - 16-bit (HalfWord) SIMD
+ * W - 32-bit (Word) SIMD
+ * D - 64-bit (Double)
+ */
+enum {
+ WM_B = 0,
+ WM_H = 1,
+ WM_D = 2
+};
+
+/*
+ * B.2.3 Transfers From Coprocessor Register (MRC)
+ * Table B-5
+ */
+enum {
+ WM_TMRC_OP2 = 0,
+ WM_TMRC_CPNUM = 1,
+
+ WM_TMOVMSK_OP2 = 1,
+ WM_TMOVMSK_CPNUM = 0,
+
+ WM_TANDC_OP2 = 1,
+ WM_TANDC_CPNUM = 1,
+
+ WM_TORC_OP2 = 2,
+ WM_TORC_CPNUM = 1,
+
+ WM_TEXTRC_OP2 = 3,
+ WM_TEXTRC_CPNUM = 1,
+
+ WM_TEXTRM_OP2 = 3,
+ WM_TEXTRM_CPNUM = 0
+};
+
+
+/*
+ * TANDC<B,H,W>{Cond} R15
+ * Performs AND across the fields of the SIMD PSR register (wCASF) and sends the result
+ * to CPSR; can be performed after a Byte, Half-word or Word operation that sets the flags.
+ * NOTE: R15 is omitted from the macro declaration;
+ */
+#define DEF_WM_TNADC_CC(_q, _cc) WM_DEF_CC((_cc)) + ((_q) << 0x16) + 0xE13F130
+
+#define _WM_TNADC_CC(_q, _cc) WM_ASM(DEF_WM_TNADC_CC(_q, _cc))
+#define ARM_WM_TNADC_CC(_p, _q, _cc) WM_EMIT(_p, DEF_WM_TNADC_CC(_q, _cc))
+
+/* inline assembly */
+#define _WM_TNADC(_q) _WM_TNADC_CC((_q), WM_CC_AL)
+#define _WM_TNADCB() _WM_TNADC(WM_B)
+#define _WM_TNADCH() _WM_TNADC(WM_H)
+#define _WM_TNADCD() _WM_TNADC(WM_D)
+
+/* codegen */
+#define ARM_WM_TNADC(_p, _q) ARM_WM_TNADC_CC((_p), (_q), WM_CC_AL)
+#define ARM_WM_TNADCB(_p) ARM_WM_TNADC(_p, WM_B)
+#define ARM_WM_TNADCH(_p) ARM_WM_TNADC(_p, WM_H)
+#define ARM_WM_TNADCD(_p) ARM_WM_TNADC(_p, WM_D)
+
+
+/*
+ * TBCST<B,H,W>{Cond} wRd, Rn
+ * Broadcasts a value from the ARM Source reg (Rn) to every SIMD position
+ * in the WMMX Destination reg (wRd).
+ */
+#define DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn) \
+ WM_DEF_CC((_cc)) + ((_q) << 6) + ((_wrd) << 16) + ((_rn) << 12) + 0xE200010
+
+#define _WM_TBCST_CC(_q, _cc, _wrd, _rn) WM_ASM(DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+#define ARM_WM_TBCST_CC(_p, _q, _cc, _wrd, _rn) WM_EMIT(_p, DEF_WM_TBCST_CC(_q, _cc, _wrd, _rn))
+
+/* inline */
+#define _WM_TBCST(_q, _wrd, _rn) _WM_TBCST_CC(_q, WM_CC_AL, _wrd, _rn)
+#define _WM_TBCSTB(_wrd, _rn) _WM_TBCST(WM_B)
+#define _WM_TBCSTH(_wrd, _rn) _WM_TBCST(WM_H)
+#define _WM_TBCSTD(_wrd, _rn) _WM_TBCST(WM_D)
+
+/* codegen */
+#define ARM_WM_TBCST(_p, _q, _wrd, _rn) ARM_WM_TBCST_CC(_p, _q, WM_CC_AL, _wrd, _rn)
+#define ARM_WM_TBCSTB(_p, _wrd, _rn) _WM_TBCST(_p, WM_B)
+#define ARM_WM_TBCSTH(_p, _wrd, _rn) _WM_TBCST(_p, WM_H)
+#define ARM_WM_TBCSTD(_p, _wrd, _rn) _WM_TBCST(_p, WM_D)
+
+
+#endif /* __WMMX_H__ */
diff --git a/src/arch/arm/cmp_macros.th b/src/arch/arm/cmp_macros.th
new file mode 100644
index 0000000..cb2639d
--- /dev/null
+++ b/src/arch/arm/cmp_macros.th
@@ -0,0 +1,56 @@
+/* PSR := <Op> Rn, (imm8 ROR 2*rot) */
+#define ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, 0, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rn, imm8) \
+ _<Op>_REG_IMM8_COND(rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, Rm */
+#define ARM_<Op>_REG_REG_COND(p, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, 0, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, 0, rn, rm, cond)
+#define _<Op>_REG_REG(rn, rm) \
+ _<Op>_REG_REG_COND(rn, rm, ARMCOND_AL)
+#endif
+
+
+/* PSR := <Op> Rn, (Rm <shift_type> imm8) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, 0, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpi_macros.th b/src/arch/arm/dpi_macros.th
new file mode 100644
index 0000000..be43d1f
--- /dev/null
+++ b/src/arch/arm/dpi_macros.th
@@ -0,0 +1,112 @@
+/* -- <Op> -- */
+
+/* Rd := Rn <Op> (imm8 ROR rot) ; rot is power of 2 */
+#define ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, rd, rn, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#define _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, rd, rn, imm8, rot, cond)
+#define _<Op>S_REG_IMM(rd, rn, imm8, rot) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, cond) \
+ ARM_<Op>S_REG_IMM_COND(p, rd, rn, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, rd, rn, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, rd, rn, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>_REG_IMM8(rd, rn, imm8) \
+ _<Op>_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#define _<Op>S_REG_IMM8_COND(rd, rn, imm8, cond) \
+ _<Op>S_REG_IMM_COND(rd, rn, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(rd, rn, imm8) \
+ _<Op>S_REG_IMM8_COND(rd, rn, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+#define ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, rn, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rn, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>_REG_REG(rd, rn, rm) \
+ _<Op>_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#define _<Op>S_REG_REG_COND(rd, rn, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, rn, rm, cond)
+#define _<Op>S_REG_REG(rd, rn, rm) \
+ _<Op>S_REG_REG_COND(rd, rn, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> imm_shift) */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rn, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rn, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rn, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rn <Op> (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rn, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rn, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#define _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, rn, rm, shift_t, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rn, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rn, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/dpiops.sh b/src/arch/arm/dpiops.sh
new file mode 100755
index 0000000..d3b93ff
--- /dev/null
+++ b/src/arch/arm/dpiops.sh
@@ -0,0 +1,30 @@
+#!/bin/sh
+
+OPCODES="AND EOR SUB RSB ADD ADC SBC RSC ORR BIC"
+CMP_OPCODES="TST TEQ CMP CMN"
+MOV_OPCODES="MOV MVN"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+
+
+echo -e "/* Macros for DPI ops, auto-generated from template */\n"
+
+echo -e "\n/* mov/mvn */\n"
+gen "$MOV_OPCODES" mov_macros
+
+echo -e "\n/* DPIs, arithmetic and logical */\n"
+gen "$OPCODES" dpi_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* DPIs, comparison */\n"
+gen "$CMP_OPCODES" cmp_macros
+
+echo -e "\n/* end generated */\n"
diff --git a/src/arch/arm/mov_macros.th b/src/arch/arm/mov_macros.th
new file mode 100644
index 0000000..6bac290
--- /dev/null
+++ b/src/arch/arm/mov_macros.th
@@ -0,0 +1,121 @@
+/* Rd := imm8 ROR rot */
+#define ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define ARM_<Op>S_REG_IMM(p, reg, imm8, rot) \
+ ARM_<Op>S_REG_IMM_COND(p, reg, imm8, rot, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>_REG_IMM(reg, imm8, rot) \
+ _<Op>_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM_COND(reg, imm8, rot, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, rot, cond)
+#define _<Op>S_REG_IMM(reg, imm8, rot) \
+ _<Op>S_REG_IMM_COND(reg, imm8, rot, ARMCOND_AL)
+#endif
+
+
+/* Rd := imm8 */
+#define ARM_<Op>_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, cond) \
+ ARM_DPIOP_S_REG_IMM8ROT_COND(p, ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define ARM_<Op>S_REG_IMM8(p, reg, imm8) \
+ ARM_<Op>S_REG_IMM8_COND(p, reg, imm8, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>_REG_IMM8(reg, imm8) \
+ _<Op>_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMM8_COND(reg, imm8, cond) \
+ ARM_IASM_DPIOP_S_REG_IMM8ROT_COND(ARMOP_<Op>, reg, 0, imm8, 0, cond)
+#define _<Op>S_REG_IMM8(reg, imm8) \
+ _<Op>S_REG_IMM8_COND(reg, imm8, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm */
+#define ARM_<Op>_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>_REG_REG(p, rd, rm) \
+ ARM_<Op>_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REG_COND(p, rd, rm, cond) \
+ ARM_DPIOP_S_REG_REG_COND(p, ARMOP_<Op>, rd, 0, rm, cond)
+#define ARM_<Op>S_REG_REG(p, rd, rm) \
+ ARM_<Op>S_REG_REG_COND(p, rd, rm, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>_REG_REG(rd, rm) \
+ _<Op>_REG_REG_COND(rd, rm, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REG_COND(rd, rm, cond) \
+ ARM_IASM_DPIOP_S_REG_REG_COND(ARMOP_<Op>, rd, 0, rm, cond)
+#define _<Op>S_REG_REG(rd, rm) \
+ _<Op>S_REG_REG_COND(rd, rm, ARMCOND_AL)
+#endif
+
+
+/* Rd := Rm <shift_type> imm_shift */
+#define ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, cond) \
+ ARM_DPIOP_S_REG_IMMSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define ARM_<Op>S_REG_IMMSHIFT(p, rd, rm, shift_type, imm_shift) \
+ ARM_<Op>S_REG_IMMSHIFT_COND(p, rd, rm, shift_type, imm_shift, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, cond) \
+ ARM_IASM_DPIOP_S_REG_IMMSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, imm_shift, cond)
+#define _<Op>S_REG_IMMSHIFT(rd, rm, shift_type, imm_shift) \
+ _<Op>S_REG_IMMSHIFT_COND(rd, rm, shift_type, imm_shift, ARMCOND_AL)
+#endif
+
+
+
+/* Rd := (Rm <shift_type> Rs) */
+#define ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, cond) \
+ ARM_DPIOP_S_REG_REGSHIFT_COND(p, ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define ARM_<Op>S_REG_REGSHIFT(p, rd, rm, shift_type, rs) \
+ ARM_<Op>S_REG_REGSHIFT_COND(p, rd, rm, shift_type, rs, ARMCOND_AL)
+
+#ifndef ARM_NOIASM
+#define _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+/* S */
+#define _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, cond) \
+ ARM_IASM_DPIOP_S_REG_REGSHIFT_COND(ARMOP_<Op>, rd, 0, rm, shift_type, rs, cond)
+#define _<Op>S_REG_REGSHIFT(rd, rm, shift_type, rs) \
+ _<Op>S_REG_REGSHIFT_COND(rd, rm, shift_type, rs, ARMCOND_AL)
+#endif
+
+
diff --git a/src/arch/arm/tramp.c b/src/arch/arm/tramp.c
new file mode 100644
index 0000000..f736c7a
--- /dev/null
+++ b/src/arch/arm/tramp.c
@@ -0,0 +1,710 @@
+/*
+ * Create trampolines to invoke arbitrary functions.
+ * Copyright (c) 2002 Sergey Chaban <serge@wildwestsoftware.com>
+ *
+ * Contributions by Malte Hildingson
+ */
+
+#include "arm-codegen.h"
+#include "arm-dis.h"
+
+#if defined(_WIN32_WCE) || defined (UNDER_CE)
+# include <windows.h>
+#else
+#include <unistd.h>
+#include <sys/mman.h>
+#endif
+
+#if !defined(PLATFORM_MACOSX)
+#include <errno.h>
+
+#include "mono/metadata/class.h"
+#include "mono/metadata/tabledefs.h"
+#include "mono/interpreter/interp.h"
+#include "mono/metadata/appdomain.h"
+
+
+#if 0
+# define ARM_DUMP_DISASM 1
+#endif
+
+/* prototypes for private functions (to avoid compiler warnings) */
+void flush_icache (void);
+void* alloc_code_buff (int num_instr);
+
+
+
+/*
+ * The resulting function takes the form:
+ * void func (void (*callme)(), void *retval, void *this_obj, stackval *arguments);
+ * NOTE: all args passed in ARM registers (A1-A4),
+ * then copied to R4-R7 (see definitions below).
+ */
+
+#define REG_FUNC_ADDR ARMREG_R4
+#define REG_RETVAL ARMREG_R5
+#define REG_THIS ARMREG_R6
+#define REG_ARGP ARMREG_R7
+
+
+#define ARG_SIZE sizeof(stackval)
+
+
+
+
+void flush_icache ()
+{
+#if defined(_WIN32)
+ FlushInstructionCache(GetCurrentProcess(), NULL, 0);
+#else
+# if 0
+ asm ("mov r0, r0");
+ asm ("mov r0, #0");
+ asm ("mcr p15, 0, r0, c7, c7, 0");
+# else
+ /* TODO: use (movnv pc, rx) method */
+# endif
+#endif
+}
+
+
+void* alloc_code_buff (int num_instr)
+{
+ void* code_buff;
+ int code_size = num_instr * sizeof(arminstr_t);
+
+#if defined(_WIN32) || defined(UNDER_CE)
+ int old_prot = 0;
+
+ code_buff = malloc(code_size);
+ VirtualProtect(code_buff, code_size, PAGE_EXECUTE_READWRITE, &old_prot);
+#else
+ int page_size = sysconf(_SC_PAGESIZE);
+ int new_code_size;
+
+ new_code_size = code_size + page_size - 1;
+ code_buff = malloc(new_code_size);
+ code_buff = (void *) (((int) code_buff + page_size - 1) & ~(page_size - 1));
+
+ if (mprotect(code_buff, code_size, PROT_READ|PROT_WRITE|PROT_EXEC) != 0) {
+ g_critical (G_GNUC_PRETTY_FUNCTION
+ ": mprotect error: %s", g_strerror (errno));
+ }
+#endif
+
+ return code_buff;
+}
+
+
+/*
+ * Refer to ARM Procedure Call Standard (APCS) for more info.
+ */
+MonoPIFunc mono_arch_create_trampoline (MonoMethodSignature *sig, gboolean string_ctor)
+{
+ MonoType* param;
+ MonoPIFunc code_buff;
+ arminstr_t* p;
+ guint32 code_size, stack_size;
+ guint32 simple_type;
+ int i, hasthis, aregs, regc, stack_offs;
+ int this_loaded;
+ guchar reg_alloc [ARM_NUM_ARG_REGS];
+
+ /* pessimistic estimation for prologue/epilogue size */
+ code_size = 16 + 16;
+ /* push/pop work regs */
+ code_size += 2;
+ /* call */
+ code_size += 2;
+ /* handle retval */
+ code_size += 2;
+
+ stack_size = 0;
+ hasthis = sig->hasthis ? 1 : 0;
+
+ aregs = ARM_NUM_ARG_REGS - hasthis;
+
+ for (i = 0, regc = aregs; i < sig->param_count; ++i) {
+ param = sig->params [i];
+
+ /* keep track of argument sizes */
+ if (i < ARM_NUM_ARG_REGS) reg_alloc [i] = 0;
+
+ if (param->byref) {
+ if (regc > 0) {
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ code_size += 2;
+ stack_size += sizeof(gpointer);
+ }
+ } else {
+ simple_type = param->type;
+enum_calc_size:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ /* keep track of argument sizes */
+ if (regc > 1) {
+ /* fits into registers, two LDRs */
+ code_size += 2;
+ reg_alloc [i] = regc;
+ regc -= 2;
+ } else if (regc > 0) {
+ /* first half fits into register, one LDR */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ /* the rest on the stack, LDR/STR */
+ code_size += 2;
+ stack_size += 4;
+ } else {
+ /* stack arg, 4 instrs - 2x(LDR/STR) */
+ code_size += 4;
+ stack_size += 2 * 4;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_calc_size;
+ }
+
+ if (mono_class_value_size(param->data.klass, NULL) != 4) {
+ g_error("can only marshal enums, not generic structures (size: %d)", mono_class_value_size(param->data.klass, NULL));
+ }
+ if (regc > 0) {
+ /* register arg */
+ code_size += 1;
+ reg_alloc [i] = regc;
+ --regc;
+ } else {
+ /* stack arg */
+ code_size += 2;
+ stack_size += 4;
+ }
+ break;
+ default :
+ break;
+ }
+ }
+ }
+
+ code_buff = (MonoPIFunc)alloc_code_buff(code_size);
+ p = (arminstr_t*)code_buff;
+
+ /* prologue */
+ p = arm_emit_lean_prologue(p, stack_size,
+ /* save workset (r4-r7) */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+
+ /* copy args into workset */
+ /* callme - always present */
+ ARM_MOV_REG_REG(p, ARMREG_R4, ARMREG_A1);
+ /* retval */
+ if (sig->ret->byref || string_ctor || (sig->ret->type != MONO_TYPE_VOID)) {
+ ARM_MOV_REG_REG(p, ARMREG_R5, ARMREG_A2);
+ }
+ /* this_obj */
+ if (sig->hasthis) {
+ this_loaded = 0;
+ if (stack_size == 0) {
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_A3);
+ this_loaded = 1;
+ } else {
+ ARM_MOV_REG_REG(p, ARMREG_R6, ARMREG_A3);
+ }
+ }
+ /* args */
+ if (sig->param_count != 0) {
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_A4);
+ }
+
+ stack_offs = stack_size;
+
+ /* handle arguments */
+ /* in reverse order so we could use r0 (arg1) for memory transfers */
+ for (i = sig->param_count; --i >= 0;) {
+ param = sig->params [i];
+ if (param->byref) {
+ if (i < aregs && reg_alloc[i] > 0) {
+ ARM_LDR_IMM(p, ARMREG_A1 + i, REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ } else {
+ simple_type = param->type;
+enum_marshal:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_PTR:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_STRING:
+ if (i < aregs && reg_alloc [i] > 0) {
+ /* pass in register */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (i < aregs && reg_alloc [i] > 0) {
+ if (reg_alloc [i] > 1) {
+ /* pass in registers */
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]) + 1, REG_ARGP, i*ARG_SIZE + 4);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_A1 + hasthis + (aregs - reg_alloc [i]), REG_ARGP, i*ARG_SIZE);
+ }
+ } else {
+ /* two words transferred on the stack */
+ stack_offs -= 2*sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i*ARG_SIZE + 4);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs + 4);
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (param->data.klass->enumtype) {
+ /* it's an enum value, proceed based on its base type */
+ simple_type = param->data.klass->enum_basetype->type;
+ goto enum_marshal;
+ } else {
+ if (i < aregs && reg_alloc[i] > 0) {
+ int vtreg = ARMREG_A1 + hasthis +
+ hasthis + (aregs - reg_alloc[i]);
+ ARM_LDR_IMM(p, vtreg, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, vtreg, vtreg, 0);
+ } else {
+ stack_offs -= sizeof(armword_t);
+ ARM_LDR_IMM(p, ARMREG_R0, REG_ARGP, i * ARG_SIZE);
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R0, 0);
+ ARM_STR_IMM(p, ARMREG_R0, ARMREG_SP, stack_offs);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (sig->hasthis && !this_loaded) {
+ /* [this] always passed in A1, regardless of sig->call_convention */
+ ARM_MOV_REG_REG(p, ARMREG_A1, REG_THIS);
+ }
+
+ /* call [func] */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, REG_FUNC_ADDR);
+
+ /* handle retval */
+ if (sig->ret->byref || string_ctor) {
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ } else {
+ simple_type = sig->ret->type;
+enum_retvalue:
+ switch (simple_type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_STRB_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_STRH_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 32-bit integer and integer-equivalent return value
+ * is returned in R0.
+ * Single-precision floating-point values are returned in R0.
+ */
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ case MONO_TYPE_STRING:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ break;
+ /*
+ * A 64-bit integer is returned in R0 and R1.
+ * Double-precision floating-point values are returned in R0 and R1.
+ */
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_STR_IMM(p, ARMREG_R0, REG_RETVAL, 0);
+ ARM_STR_IMM(p, ARMREG_R1, REG_RETVAL, 4);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ if (sig->ret->data.klass->enumtype) {
+ simple_type = sig->ret->data.klass->enum_basetype->type;
+ goto enum_retvalue;
+ }
+ break;
+ case MONO_TYPE_VOID:
+ break;
+ default:
+ break;
+ }
+ }
+
+ p = arm_emit_std_epilogue(p, stack_size,
+ /* restore R4-R7 */
+ (1 << ARMREG_R4) | (1 << ARMREG_R5) | (1 << ARMREG_R6) | (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ return code_buff;
+}
+
+
+
+#define MINV_OFFS(member) G_STRUCT_OFFSET(MonoInvocation, member)
+
+
+
+/*
+ * Returns a pointer to a native function that can be used to
+ * call the specified method.
+ * The function created will receive the arguments according
+ * to the call convention specified in the method.
+ * This function works by creating a MonoInvocation structure,
+ * filling the fields in and calling ves_exec_method on it.
+ * Still need to figure out how to handle the exception stuff
+ * across the managed/unmanaged boundary.
+ */
+void* mono_arch_create_method_pointer (MonoMethod* method)
+{
+ MonoMethodSignature* sig;
+ guchar* p, * p_method, * p_stackval_from_data, * p_exec;
+ void* code_buff;
+ int i, stack_size, arg_pos, arg_add, stackval_pos, offs;
+ int areg, reg_args, shift, pos;
+ MonoJitInfo *ji;
+
+ code_buff = alloc_code_buff(128);
+ p = (guchar*)code_buff;
+
+ sig = method->signature;
+
+ ARM_B(p, 3);
+
+ /* embed magic number followed by method pointer */
+ *p++ = 'M';
+ *p++ = 'o';
+ *p++ = 'n';
+ *p++ = 'o';
+ /* method ptr */
+ *(void**)p = method;
+ p_method = p;
+ p += 4;
+
+ /* call table */
+ *(void**)p = stackval_from_data;
+ p_stackval_from_data = p;
+ p += 4;
+ *(void**)p = ves_exec_method;
+ p_exec = p;
+ p += 4;
+
+ stack_size = sizeof(MonoInvocation) + ARG_SIZE*(sig->param_count + 1) + ARM_NUM_ARG_REGS*2*sizeof(armword_t);
+
+ /* prologue */
+ p = (guchar*)arm_emit_lean_prologue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ /* R7 - ptr to stack args */
+ ARM_MOV_REG_REG(p, ARMREG_R7, ARMREG_IP);
+
+ /*
+ * Initialize MonoInvocation fields, first the ones known now.
+ */
+ ARM_MOV_REG_IMM8(p, ARMREG_R4, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(ex_handler));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(parent));
+
+ /* Set the method pointer. */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, -(int)(p - p_method + sizeof(arminstr_t)*2));
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(method));
+
+ if (sig->hasthis) {
+ /* [this] in A1 */
+ ARM_STR_IMM(p, ARMREG_A1, ARMREG_SP, MINV_OFFS(obj));
+ } else {
+ /* else set minv.obj to NULL */
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_SP, MINV_OFFS(obj));
+ }
+
+ /* copy args from registers to stack */
+ areg = ARMREG_A1 + sig->hasthis;
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ arg_add = 0;
+ for (i = 0; i < sig->param_count; ++i) {
+ if (areg >= ARM_NUM_ARG_REGS) break;
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos);
+ ++areg;
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ if (areg >= ARM_NUM_ARG_REGS) {
+ /* load second half of 64-bit arg */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_R7, 0);
+ ARM_STR_IMM(p, ARMREG_R4, ARMREG_R7, arg_pos + sizeof(armword_t));
+ arg_add = sizeof(armword_t);
+ } else {
+ /* second half is already the register */
+ ARM_STR_IMM(p, areg, ARMREG_R7, arg_pos + sizeof(armword_t));
+ ++areg;
+ }
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ arg_pos += 2 * sizeof(armword_t);
+ }
+ /* number of args passed in registers */
+ reg_args = i;
+
+
+
+ /*
+ * Calc and save stack args ptr,
+ * args follow MonoInvocation struct on the stack.
+ */
+ ARM_ADD_REG_IMM8(p, ARMREG_R1, ARMREG_SP, sizeof(MonoInvocation));
+ ARM_STR_IMM(p, ARMREG_R1, ARMREG_SP, MINV_OFFS(stack_args));
+
+ /* convert method args to stackvals */
+ arg_pos = -(int)(ARM_NUM_ARG_REGS - sig->hasthis) * 2 * sizeof(armword_t);
+ stackval_pos = sizeof(MonoInvocation);
+ for (i = 0; i < sig->param_count; ++i) {
+ if (i < reg_args) {
+ ARM_SUB_REG_IMM8(p, ARMREG_A3, ARMREG_R7, -arg_pos);
+ arg_pos += 2 * sizeof(armword_t);
+ } else {
+ if (arg_pos < 0) arg_pos = 0;
+ pos = arg_pos + arg_add;
+ if (pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A3, ARMREG_R7, pos);
+ } else {
+ if (is_arm_const((armword_t)pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A3, ARMREG_R7, pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_R7, ARMREG_R6);
+ }
+ }
+ arg_pos += sizeof(armword_t);
+ if (!sig->params[i]->byref) {
+ switch (sig->params[i]->type) {
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ arg_pos += sizeof(armword_t);
+ break;
+ case MONO_TYPE_VALUETYPE:
+ /* assert */
+ default:
+ break;
+ }
+ }
+ }
+
+ /* A2 = result */
+ if (stackval_pos <= 0xFF) {
+ ARM_ADD_REG_IMM8(p, ARMREG_A2, ARMREG_SP, stackval_pos);
+ } else {
+ if (is_arm_const((armword_t)stackval_pos)) {
+ shift = calc_arm_mov_const_shift((armword_t)stackval_pos);
+ ARM_ADD_REG_IMM(p, ARMREG_A2, ARMREG_SP, stackval_pos >> ((32 - shift) & 31), shift >> 1);
+ } else {
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R6, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_A2, ARMREG_SP, ARMREG_R6);
+ }
+ }
+
+ /* A1 = type */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_A1, (armword_t)sig->params [i]);
+
+ stackval_pos += ARG_SIZE;
+
+ offs = -(p + 2*sizeof(arminstr_t) - p_stackval_from_data);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call stackval_from_data */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+ }
+
+ /* store retval ptr */
+ p = (guchar*)arm_mov_reg_imm32((arminstr_t*)p, ARMREG_R5, (armword_t)stackval_pos);
+ ARM_ADD_REG_REG(p, ARMREG_R5, ARMREG_SP, ARMREG_R4);
+ ARM_STR_IMM(p, ARMREG_R5, ARMREG_SP, MINV_OFFS(retval));
+
+ /*
+ * Call the method.
+ */
+ /* A1 = MonoInvocation ptr */
+ ARM_MOV_REG_REG(p, ARMREG_A1, ARMREG_SP);
+ offs = -(p + 2*sizeof(arminstr_t) - p_exec);
+ /* load function address */
+ ARM_LDR_IMM(p, ARMREG_R4, ARMREG_PC, offs);
+ /* call ves_exec */
+ ARM_MOV_REG_REG(p, ARMREG_LR, ARMREG_PC);
+ ARM_MOV_REG_REG(p, ARMREG_PC, ARMREG_R4);
+
+
+ /*
+ * Move retval into reg.
+ */
+ if (sig->ret->byref) {
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ } else {
+ switch (sig->ret->type) {
+ case MONO_TYPE_BOOLEAN:
+ case MONO_TYPE_I1:
+ case MONO_TYPE_U1:
+ ARM_LDRB_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_CHAR:
+ case MONO_TYPE_I2:
+ case MONO_TYPE_U2:
+ ARM_LDRH_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I:
+ case MONO_TYPE_U:
+ case MONO_TYPE_I4:
+ case MONO_TYPE_U4:
+ case MONO_TYPE_R4:
+ case MONO_TYPE_OBJECT:
+ case MONO_TYPE_CLASS:
+ case MONO_TYPE_ARRAY:
+ case MONO_TYPE_SZARRAY:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ break;
+ case MONO_TYPE_I8:
+ case MONO_TYPE_U8:
+ case MONO_TYPE_R8:
+ ARM_LDR_IMM(p, ARMREG_R0, ARMREG_R5, 0);
+ ARM_LDR_IMM(p, ARMREG_R1, ARMREG_R5, 4);
+ break;
+ case MONO_TYPE_VOID:
+ default:
+ break;
+ }
+ }
+
+
+ p = (guchar*)arm_emit_std_epilogue((arminstr_t*)p, stack_size,
+ (1 << ARMREG_R4) |
+ (1 << ARMREG_R5) |
+ (1 << ARMREG_R6) |
+ (1 << ARMREG_R7));
+
+ flush_icache();
+
+#ifdef ARM_DUMP_DISASM
+ _armdis_decode((arminstr_t*)code_buff, ((guint8*)p) - ((guint8*)code_buff));
+#endif
+
+ ji = g_new0(MonoJitInfo, 1);
+ ji->method = method;
+ ji->code_size = ((guint8 *) p) - ((guint8 *) code_buff);
+ ji->code_start = (gpointer) code_buff;
+
+ mono_jit_info_table_add(mono_get_root_domain (), ji);
+
+ return code_buff;
+}
+
+
+/*
+ * mono_create_method_pointer () will insert a pointer to the MonoMethod
+ * so that the interp can easily get at the data: this function will retrieve
+ * the method from the code stream.
+ */
+MonoMethod* mono_method_pointer_get (void* code)
+{
+ unsigned char* c = code;
+ /* check out magic number that follows unconditional branch */
+ if (c[4] == 'M' &&
+ c[5] == 'o' &&
+ c[6] == 'n' &&
+ c[7] == 'o') return ((MonoMethod**)code)[2];
+ return NULL;
+}
+#endif
diff --git a/src/arch/arm/vfp_macros.th b/src/arch/arm/vfp_macros.th
new file mode 100644
index 0000000..cca67dc
--- /dev/null
+++ b/src/arch/arm/vfp_macros.th
@@ -0,0 +1,15 @@
+/* -- <Op> -- */
+
+
+/* Fd := Fn <Op> Fm */
+#define ARM_VFP_<Op>D_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>D(p, rd, rn, rm) \
+ ARM_VFP_<Op>D_COND(p, rd, rn, rm, ARMCOND_AL)
+
+#define ARM_VFP_<Op>S_COND(p, rd, rn, rm, cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_DYADIC(cond,ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,rd,rn,rm))
+#define ARM_VFP_<Op>S(p, rd, rn, rm) \
+ ARM_VFP_<Op>S_COND(p, rd, rn, rm, ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpm_macros.th b/src/arch/arm/vfpm_macros.th
new file mode 100644
index 0000000..25ad721
--- /dev/null
+++ b/src/arch/arm/vfpm_macros.th
@@ -0,0 +1,14 @@
+/* -- <Op> -- */
+
+
+/* Fd := <Op> Fm */
+
+#define ARM_<Op>D_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_DOUBLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>D(p,dreg,sreg) ARM_<Op>D_COND(p,dreg,sreg,ARMCOND_AL)
+
+#define ARM_<Op>S_COND(p,dreg,sreg,cond) \
+ ARM_EMIT((p), ARM_DEF_VFP_MONADIC((cond),ARM_VFP_COPROC_SINGLE,ARM_VFP_<Op>,(dreg),(sreg)))
+#define ARM_<Op>S(p,dreg,sreg) ARM_<Op>S_COND(p,dreg,sreg,ARMCOND_AL)
+
+
diff --git a/src/arch/arm/vfpops.sh b/src/arch/arm/vfpops.sh
new file mode 100755
index 0000000..bed4a9c
--- /dev/null
+++ b/src/arch/arm/vfpops.sh
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+DYADIC="ADD SUB MUL NMUL DIV"
+MONADIC="CPY ABS NEG SQRT CMP CMPE CMPZ CMPEZ CVT UITO SITO TOUI TOSI TOUIZ TOSIZ"
+
+# $1: opcode list
+# $2: template
+gen() {
+ for i in $1; do
+ sed "s/<Op>/$i/g" $2.th
+ done
+}
+
+echo -e "/* Macros for VFP ops, auto-generated from template */\n"
+
+echo -e "\n/* dyadic */\n"
+gen "$DYADIC" vfp_macros
+
+echo -e "\n/* monadic */\n"
+gen "$MONADIC" vfpm_macros
+
+echo -e "\n\n"
+
+echo -e "\n/* end generated */\n"
diff --git a/src/arch/arm64/.gitignore b/src/arch/arm64/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/src/arch/arm64/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/src/arch/arm64/Makefile.am b/src/arch/arm64/Makefile.am
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/arch/arm64/Makefile.am
diff --git a/src/arch/arm64/arm64-codegen.h b/src/arch/arm64/arm64-codegen.h
new file mode 100644
index 0000000..259ff96
--- /dev/null
+++ b/src/arch/arm64/arm64-codegen.h
@@ -0,0 +1,3 @@
+#include "../../../../mono-extensions/mono/arch/arm64/arm64-codegen.h"
+
+
diff --git a/src/arch/ia64/.gitignore b/src/arch/ia64/.gitignore
new file mode 100644
index 0000000..b336cc7
--- /dev/null
+++ b/src/arch/ia64/.gitignore
@@ -0,0 +1,2 @@
+/Makefile
+/Makefile.in
diff --git a/src/arch/ia64/Makefile.am b/src/arch/ia64/Makefile.am
new file mode 100644
index 0000000..e03ea47
--- /dev/null
+++ b/src/arch/ia64/Makefile.am
@@ -0,0 +1,3 @@
+EXTRA_DIST = ia64-codegen.h
+
+
diff --git a/src/arch/ia64/codegen.c b/src/arch/ia64/codegen.c
new file mode 100644
index 0000000..97e1aef
--- /dev/null
+++ b/src/arch/ia64/codegen.c
@@ -0,0 +1,861 @@
+/*
+ * codegen.c: Tests for the IA64 code generation macros
+ */
+
+#include <glib.h>
+#include <stdio.h>
+#include <ctype.h>
+
+#define IA64_SIMPLE_EMIT_BUNDLE
+
+#include <mono/arch/ia64/ia64-codegen.h>
+
+void
+mono_disassemble_code (guint8 *code, int size, char *id)
+{
+ int i;
+ FILE *ofd;
+ const char *tmp = g_get_tmp_dir ();
+ const char *objdump_args = g_getenv ("MONO_OBJDUMP_ARGS");
+ char *as_file;
+ char *o_file;
+ char *cmd;
+
+ as_file = g_strdup_printf ("%s/test.s", tmp);
+
+ if (!(ofd = fopen (as_file, "w")))
+ g_assert_not_reached ();
+
+ for (i = 0; id [i]; ++i) {
+ if (!isalnum (id [i]))
+ fprintf (ofd, "_");
+ else
+ fprintf (ofd, "%c", id [i]);
+ }
+ fprintf (ofd, ":\n");
+
+ for (i = 0; i < size; ++i)
+ fprintf (ofd, ".byte %d\n", (unsigned int) code [i]);
+
+ fclose (ofd);
+
+#ifdef __ia64__
+#define DIS_CMD "objdump -d"
+#define AS_CMD "as"
+#else
+#define DIS_CMD "ia64-linux-gnu-objdump -d"
+#define AS_CMD "ia64-linux-gnu-as"
+#endif
+
+ o_file = g_strdup_printf ("%s/test.o", tmp);
+ cmd = g_strdup_printf (AS_CMD " %s -o %s", as_file, o_file);
+ system (cmd);
+ g_free (cmd);
+ if (!objdump_args)
+ objdump_args = "";
+
+ cmd = g_strdup_printf (DIS_CMD " %s %s", objdump_args, o_file);
+ system (cmd);
+ g_free (cmd);
+
+ g_free (o_file);
+ g_free (as_file);
+}
+
+int
+main ()
+{
+ Ia64CodegenState code;
+
+ guint8 *buf = g_malloc0 (40960);
+
+ ia64_codegen_init (code, buf);
+
+ ia64_add (code, 1, 2, 3);
+ ia64_add1 (code, 1, 2, 3);
+ ia64_sub (code, 1, 2, 3);
+ ia64_sub1 (code, 1, 2, 3);
+ ia64_addp4 (code, 1, 2, 3);
+ ia64_and (code, 1, 2, 3);
+ ia64_andcm (code, 1, 2, 3);
+ ia64_or (code, 1, 2, 3);
+ ia64_xor (code, 1, 2, 3);
+ ia64_shladd (code, 1, 2, 3, 4);
+ ia64_shladdp4 (code, 1, 2, 3, 4);
+ ia64_sub_imm (code, 1, 0x7f, 2);
+ ia64_sub_imm (code, 1, -1, 2);
+ ia64_and_imm (code, 1, -128, 2);
+ ia64_andcm_imm (code, 1, -128, 2);
+ ia64_or_imm (code, 1, -128, 2);
+ ia64_xor_imm (code, 1, -128, 2);
+ ia64_adds_imm (code, 1, 8191, 2);
+ ia64_adds_imm (code, 1, -8192, 2);
+ ia64_adds_imm (code, 1, 1234, 2);
+ ia64_adds_imm (code, 1, -1234, 2);
+ ia64_addp4_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 1234, 2);
+ ia64_addl_imm (code, 1, -1234, 2);
+ ia64_addl_imm (code, 1, 2097151, 2);
+ ia64_addl_imm (code, 1, -2097152, 2);
+
+ ia64_cmp_lt (code, 1, 2, 1, 2);
+ ia64_cmp_ltu (code, 1, 2, 1, 2);
+ ia64_cmp_eq (code, 1, 2, 1, 2);
+ ia64_cmp_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp4_lt (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu (code, 1, 2, 1, 2);
+ ia64_cmp4_eq (code, 1, 2, 1, 2);
+ ia64_cmp4_lt_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_ltu_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_unc (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_and (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or (code, 1, 2, 1, 2);
+ ia64_cmp4_eq_or_andcm (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_and (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or (code, 1, 2, 1, 2);
+ ia64_cmp4_ne_or_andcm (code, 1, 2, 1, 2);
+
+ ia64_cmp_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_le_and (code, 1, 2, 0, 2);
+ ia64_cmp_le_or (code, 1, 2, 0, 2);
+ ia64_cmp_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp4_gt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_gt_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_le_and (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or (code, 1, 2, 0, 2);
+ ia64_cmp4_le_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_and (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or (code, 1, 2, 0, 2);
+ ia64_cmp4_ge_or_andcm (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_and (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or (code, 1, 2, 0, 2);
+ ia64_cmp4_lt_or_andcm (code, 1, 2, 0, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, 127, 2);
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_cmp4_lt_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_lt_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ltu_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_eq_unc_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_and_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_imm (code, 1, 2, -128, 2);
+ ia64_cmp4_ne_or_andcm_imm (code, 1, 2, -128, 2);
+
+ ia64_padd1 (code, 1, 2, 3);
+ ia64_padd2 (code, 1, 2, 3);
+ ia64_padd4 (code, 1, 2, 3);
+ ia64_padd1_sss (code, 1, 2, 3);
+ ia64_padd2_sss (code, 1, 2, 3);
+ ia64_padd1_uuu (code, 1, 2, 3);
+ ia64_padd2_uuu (code, 1, 2, 3);
+ ia64_padd1_uus (code, 1, 2, 3);
+ ia64_padd2_uus (code, 1, 2, 3);
+
+ ia64_psub1 (code, 1, 2, 3);
+ ia64_psub2 (code, 1, 2, 3);
+ ia64_psub4 (code, 1, 2, 3);
+ ia64_psub1_sss (code, 1, 2, 3);
+ ia64_psub2_sss (code, 1, 2, 3);
+ ia64_psub1_uuu (code, 1, 2, 3);
+ ia64_psub2_uuu (code, 1, 2, 3);
+ ia64_psub1_uus (code, 1, 2, 3);
+ ia64_psub2_uus (code, 1, 2, 3);
+
+ ia64_pavg1 (code, 1, 2, 3);
+ ia64_pavg2 (code, 1, 2, 3);
+ ia64_pavg1_raz (code, 1, 2, 3);
+ ia64_pavg2_raz (code, 1, 2, 3);
+ ia64_pavgsub1 (code, 1, 2, 3);
+ ia64_pavgsub2 (code, 1, 2, 3);
+ ia64_pcmp1_eq (code, 1, 2, 3);
+ ia64_pcmp2_eq (code, 1, 2, 3);
+ ia64_pcmp4_eq (code, 1, 2, 3);
+ ia64_pcmp1_gt (code, 1, 2, 3);
+ ia64_pcmp2_gt (code, 1, 2, 3);
+ ia64_pcmp4_gt (code, 1, 2, 3);
+
+ ia64_pshladd2 (code, 1, 2, 3, 4);
+ ia64_pshradd2 (code, 1, 2, 3, 4);
+
+ ia64_pmpyshr2 (code, 1, 2, 3, 0);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 0);
+ ia64_pmpyshr2 (code, 1, 2, 3, 7);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 7);
+ ia64_pmpyshr2 (code, 1, 2, 3, 15);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 15);
+ ia64_pmpyshr2 (code, 1, 2, 3, 16);
+ ia64_pmpyshr2_u (code, 1, 2, 3, 16);
+
+ ia64_pmpy2_r (code, 1, 2, 3);
+ ia64_pmpy2_l (code, 1, 2, 3);
+ ia64_mix1_r (code, 1, 2, 3);
+ ia64_mix2_r (code, 1, 2, 3);
+ ia64_mix4_r (code, 1, 2, 3);
+ ia64_mix1_l (code, 1, 2, 3);
+ ia64_mix2_l (code, 1, 2, 3);
+ ia64_mix4_l (code, 1, 2, 3);
+ ia64_pack2_uss (code, 1, 2, 3);
+ ia64_pack2_sss (code, 1, 2, 3);
+ ia64_pack4_sss (code, 1, 2, 3);
+ ia64_unpack1_h (code, 1, 2, 3);
+ ia64_unpack2_h (code, 1, 2, 3);
+ ia64_unpack4_h (code, 1, 2, 3);
+ ia64_unpack1_l (code, 1, 2, 3);
+ ia64_unpack2_l (code, 1, 2, 3);
+ ia64_unpack4_l (code, 1, 2, 3);
+ ia64_pmin1_u (code, 1, 2, 3);
+ ia64_pmax1_u (code, 1, 2, 3);
+ ia64_pmin2 (code, 1, 2, 3);
+ ia64_pmax2 (code, 1, 2, 3);
+ ia64_psad1 (code, 1, 2, 3);
+
+ ia64_mux1 (code, 1, 2, IA64_MUX1_BRCST);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_MIX);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_SHUF);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_ALT);
+ ia64_mux1 (code, 1, 2, IA64_MUX1_REV);
+
+ ia64_mux2 (code, 1, 2, 0x8d);
+
+ ia64_pshr2 (code, 1, 2, 3);
+ ia64_pshr4 (code, 1, 2, 3);
+ ia64_shr (code, 1, 2, 3);
+ ia64_pshr2_u (code, 1, 2, 3);
+ ia64_pshr4_u (code, 1, 2, 3);
+ ia64_shr_u (code, 1, 2, 3);
+
+ ia64_pshr2_imm (code, 1, 2, 20);
+ ia64_pshr4_imm (code, 1, 2, 20);
+ ia64_pshr2_u_imm (code, 1, 2, 20);
+ ia64_pshr4_u_imm (code, 1, 2, 20);
+
+ ia64_pshl2 (code, 1, 2, 3);
+ ia64_pshl4 (code, 1, 2, 3);
+ ia64_shl (code, 1, 2, 3);
+
+ ia64_pshl2_imm (code, 1, 2, 20);
+ ia64_pshl4_imm (code, 1, 2, 20);
+
+ ia64_popcnt (code, 1, 2);
+
+ ia64_shrp (code, 1, 2, 3, 62);
+
+ ia64_extr_u (code, 1, 2, 62, 61);
+ ia64_extr (code, 1, 2, 62, 61);
+
+ ia64_dep_z (code, 1, 2, 62, 61);
+
+ ia64_dep_z_imm (code, 1, 127, 62, 61);
+ ia64_dep_z_imm (code, 1, -128, 62, 61);
+ ia64_dep_imm (code, 1, 0, 2, 62, 61);
+ ia64_dep_imm (code, 1, -1, 2, 62, 61);
+ ia64_dep (code, 1, 2, 3, 10, 15);
+
+ ia64_tbit_z (code, 1, 2, 3, 0);
+
+ ia64_tbit_z (code, 1, 2, 3, 63);
+ ia64_tbit_z_unc (code, 1, 2, 3, 63);
+ ia64_tbit_z_and (code, 1, 2, 3, 63);
+ ia64_tbit_nz_and (code, 1, 2, 3, 63);
+ ia64_tbit_z_or (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or (code, 1, 2, 3, 63);
+ ia64_tbit_z_or_andcm (code, 1, 2, 3, 63);
+ ia64_tbit_nz_or_andcm (code, 1, 2, 3, 63);
+
+ ia64_tnat_z (code, 1, 2, 3);
+ ia64_tnat_z_unc (code, 1, 2, 3);
+ ia64_tnat_z_and (code, 1, 2, 3);
+ ia64_tnat_nz_and (code, 1, 2, 3);
+ ia64_tnat_z_or (code, 1, 2, 3);
+ ia64_tnat_nz_or (code, 1, 2, 3);
+ ia64_tnat_z_or_andcm (code, 1, 2, 3);
+ ia64_tnat_nz_or_andcm (code, 1, 2, 3);
+
+ ia64_nop_i (code, 0x1234);
+ ia64_hint_i (code, 0x1234);
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_chk_s_i (code, 1, 0);
+ ia64_chk_s_i (code, 1, -1);
+ ia64_chk_s_i (code, 1, 1);
+
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_SPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, 0);
+ ia64_mov_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_DPTK, IA64_BR_IH_IMP);
+ ia64_mov_ret_to_br_hint (code, 1, 1, -1, IA64_MOV_TO_BR_WH_NONE, 0);
+
+ ia64_mov_from_br (code, 1, 1);
+
+ ia64_mov_to_pred (code, 1, 0xfe);
+
+ ia64_mov_to_pred_rot_imm (code, 0xff0000);
+
+ ia64_mov_from_ip (code, 1);
+ ia64_mov_from_pred (code, 1);
+
+ ia64_mov_to_ar_i (code, 1, 1);
+
+ ia64_mov_to_ar_imm_i (code, 1, 127);
+
+ ia64_mov_from_ar_i (code, 1, 1);
+
+ ia64_zxt1 (code, 1, 2);
+ ia64_zxt2 (code, 1, 2);
+ ia64_zxt4 (code, 1, 2);
+ ia64_sxt1 (code, 1, 2);
+ ia64_sxt2 (code, 1, 2);
+ ia64_sxt4 (code, 1, 2);
+
+ ia64_czx1_l (code, 1, 2);
+ ia64_czx2_l (code, 1, 2);
+ ia64_czx1_r (code, 1, 2);
+ ia64_czx2_r (code, 1, 2);
+
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NONE);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NT1);
+ ia64_ld1_hint (code, 1, 2, IA64_LD_HINT_NTA);
+
+ ia64_ld1_hint (code, 1, 2, 0);
+ ia64_ld2_hint (code, 1, 2, 0);
+ ia64_ld4_hint (code, 1, 2, 0);
+ ia64_ld8_hint (code, 1, 2, 0);
+
+ ia64_ld1_s_hint (code, 1, 2, 0);
+ ia64_ld2_s_hint (code, 1, 2, 0);
+ ia64_ld4_s_hint (code, 1, 2, 0);
+ ia64_ld8_s_hint (code, 1, 2, 0);
+
+ ia64_ld1_a_hint (code, 1, 2, 0);
+ ia64_ld2_a_hint (code, 1, 2, 0);
+ ia64_ld4_a_hint (code, 1, 2, 0);
+ ia64_ld8_a_hint (code, 1, 2, 0);
+
+ ia64_ld1_sa_hint (code, 1, 2, 0);
+ ia64_ld2_sa_hint (code, 1, 2, 0);
+ ia64_ld4_sa_hint (code, 1, 2, 0);
+ ia64_ld8_sa_hint (code, 1, 2, 0);
+
+ ia64_ld1_bias_hint (code, 1, 2, 0);
+ ia64_ld2_bias_hint (code, 1, 2, 0);
+ ia64_ld4_bias_hint (code, 1, 2, 0);
+ ia64_ld8_bias_hint (code, 1, 2, 0);
+
+ ia64_ld1_inc_hint (code, 1, 2, 3, IA64_LD_HINT_NONE);
+
+ ia64_ld1_inc_imm_hint (code, 1, 2, 255, IA64_LD_HINT_NONE);
+ ia64_ld1_inc_imm_hint (code, 1, 2, -256, IA64_LD_HINT_NONE);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NTA);
+
+ ia64_st1_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st2_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st4_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st8_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st16_hint (code, 1, 2, IA64_ST_HINT_NONE);
+ ia64_st16_rel_hint (code, 1, 2, IA64_ST_HINT_NONE);
+
+ ia64_st1_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st1_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st2_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st4_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+ ia64_st8_rel_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_st8_spill_inc_imm_hint (code, 1, 2, 255, IA64_ST_HINT_NONE);
+
+ ia64_ldfs_hint (code, 1, 2, 0);
+ ia64_ldfd_hint (code, 1, 2, 0);
+ ia64_ldf8_hint (code, 1, 2, 0);
+ ia64_ldfe_hint (code, 1, 2, 0);
+
+ ia64_ldfs_s_hint (code, 1, 2, 0);
+ ia64_ldfd_s_hint (code, 1, 2, 0);
+ ia64_ldf8_s_hint (code, 1, 2, 0);
+ ia64_ldfe_s_hint (code, 1, 2, 0);
+
+ ia64_ldfs_a_hint (code, 1, 2, 0);
+ ia64_ldfd_a_hint (code, 1, 2, 0);
+ ia64_ldf8_a_hint (code, 1, 2, 0);
+ ia64_ldfe_a_hint (code, 1, 2, 0);
+
+ ia64_ldfs_sa_hint (code, 1, 2, 0);
+ ia64_ldfd_sa_hint (code, 1, 2, 0);
+ ia64_ldf8_sa_hint (code, 1, 2, 0);
+ ia64_ldfe_sa_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfd_c_clr_hint (code, 1, 2, 0);
+ ia64_ldf8_c_clr_hint (code, 1, 2, 0);
+ ia64_ldfe_c_clr_hint (code, 1, 2, 0);
+
+ ia64_ldfs_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfd_c_nc_hint (code, 1, 2, 0);
+ ia64_ldf8_c_nc_hint (code, 1, 2, 0);
+ ia64_ldfe_c_nc_hint (code, 1, 2, 0);
+
+ ia64_ldf_fill_hint (code, 1, 2, 0);
+
+ ia64_ldfs_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldf8_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfe_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldf_fill_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_s_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_s_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_a_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_a_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_sa_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_sa_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_clr_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfs_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfd_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldf8_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_ldfe_c_nc_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldf_fill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stfs_hint (code, 1, 2, 0);
+ ia64_stfd_hint (code, 1, 2, 0);
+ ia64_stf8_hint (code, 1, 2, 0);
+ ia64_stfe_hint (code, 1, 2, 0);
+
+ ia64_stf_spill_hint (code, 1, 2, 0);
+
+ ia64_stfs_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfd_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stf8_inc_imm_hint (code, 1, 2, 255, 0);
+ ia64_stfe_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_stf_spill_inc_imm_hint (code, 1, 2, 255, 0);
+
+ ia64_ldfps_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_s_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_s_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_a_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_a_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_sa_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_sa_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_clr_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_clr_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_ldfps_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfpd_c_nc_inc_hint (code, 1, 2, 3, 0);
+ ia64_ldfp8_c_nc_inc_hint (code, 1, 2, 3, 0);
+
+ ia64_lfetch_hint (code, 1, 0);
+ ia64_lfetch_excl_hint (code, 1, 0);
+ ia64_lfetch_fault_hint (code, 1, 0);
+ ia64_lfetch_fault_excl_hint (code, 1, 0);
+
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT1);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NT2);
+ ia64_lfetch_hint (code, 1, IA64_LFHINT_NTA);
+
+ ia64_lfetch_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_excl_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_inc_hint (code, 1, 2, 0);
+ ia64_lfetch_fault_excl_inc_hint (code, 1, 2, 0);
+
+ ia64_lfetch_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_excl_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_inc_imm_hint (code, 1, 255, 0);
+ ia64_lfetch_fault_excl_inc_imm_hint (code, 1, 255, 0);
+
+ ia64_cmpxchg1_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg1_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg2_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg4_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg8_rel_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_acq_hint (code, 1, 2, 3, 0);
+ ia64_cmpxchg16_rel_hint (code, 1, 2, 3, 0);
+ ia64_xchg1_hint (code, 1, 2, 3, 0);
+ ia64_xchg2_hint (code, 1, 2, 3, 0);
+ ia64_xchg4_hint (code, 1, 2, 3, 0);
+ ia64_xchg8_hint (code, 1, 2, 3, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, -16, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, -1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 1, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 4, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 8, 0);
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+
+ ia64_fetchadd4_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_acq_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd4_rel_hint (code, 1, 2, 16, 0);
+ ia64_fetchadd8_rel_hint (code, 1, 2, 16, 0);
+
+ ia64_setf_sig (code, 1, 2);
+ ia64_setf_exp (code, 1, 2);
+ ia64_setf_s (code, 1, 2);
+ ia64_setf_d (code, 1, 2);
+
+ ia64_getf_sig (code, 1, 2);
+ ia64_getf_exp (code, 1, 2);
+ ia64_getf_s (code, 1, 2);
+ ia64_getf_d (code, 1, 2);
+
+ ia64_chk_s_m (code, 1, 0);
+ ia64_chk_s_m (code, 1, 1);
+ ia64_chk_s_m (code, 1, -1);
+
+ ia64_chk_s_float_m (code, 1, 0);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_nc (code, 1, 1);
+ ia64_chk_a_nc (code, 1, -1);
+
+ ia64_chk_a_nc (code, 1, 0);
+ ia64_chk_a_clr (code, 1, 0);
+
+ ia64_chk_a_nc_float (code, 1, 0);
+ ia64_chk_a_clr_float (code, 1, 0);
+
+ ia64_invala (code);
+ ia64_fwb (code);
+ ia64_mf (code);
+ ia64_mf_a (code);
+ ia64_srlz_d (code);
+ ia64_stlz_i (code);
+ ia64_sync_i (code);
+
+ ia64_flushrs (code);
+ ia64_loadrs (code);
+
+ ia64_invala_e (code, 1);
+ ia64_invala_e_float (code, 1);
+
+ ia64_fc (code, 1);
+ ia64_fc_i (code, 1);
+
+ ia64_mov_to_ar_m (code, 1, 1);
+
+ ia64_mov_to_ar_imm_m (code, 1, 127);
+
+ ia64_mov_from_ar_m (code, 1, 1);
+
+ ia64_mov_to_cr (code, 1, 2);
+
+ ia64_mov_from_cr (code, 1, 2);
+
+ ia64_alloc (code, 1, 3, 4, 5, 0);
+ ia64_alloc (code, 1, 3, 4, 5, 8);
+
+ ia64_mov_to_psr_l (code, 1);
+ ia64_mov_to_psr_um (code, 1);
+
+ ia64_mov_from_psr (code, 1);
+ ia64_mov_from_psr_um (code, 1);
+
+ ia64_break_m (code, 0x1234);
+ ia64_nop_m (code, 0x1234);
+ ia64_hint_m (code, 0x1234);
+
+ ia64_br_cond_hint (code, 0, 0, 0, 0);
+ ia64_br_wexit_hint (code, 0, 0, 0, 0);
+ ia64_br_wtop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_cloop_hint (code, 0, 0, 0, 0);
+ ia64_br_cexit_hint (code, 0, 0, 0, 0);
+ ia64_br_ctop_hint (code, 0, 0, 0, 0);
+
+ ia64_br_call_hint (code, 1, 0, 0, 0, 0);
+
+ ia64_br_cond_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ia_reg_hint (code, 1, 0, 0, 0);
+ ia64_br_ret_reg_hint (code, 1, 0, 0, 0);
+
+ ia64_br_call_reg_hint (code, 1, 2, 0, 0, 0);
+
+ ia64_cover (code);
+ ia64_clrrrb (code);
+ ia64_clrrrb_pr (code);
+ ia64_rfi (code);
+ ia64_bsw_0 (code);
+ ia64_bsw_1 (code);
+ ia64_epc (code);
+
+ ia64_break_b (code, 0x1234);
+ ia64_nop_b (code, 0x1234);
+ ia64_hint_b (code, 0x1234);
+
+ ia64_break_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl (code, 1, 0x123456789ABCDEF0LL);
+
+ ia64_brl_cond_hint (code, 0, 0, 0, 0);
+ ia64_brl_cond_hint (code, -1, 0, 0, 0);
+
+ ia64_brl_call_hint (code, 1, 0, 0, 0, 0);
+ ia64_brl_call_hint (code, 1, -1, 0, 0, 0);
+
+ ia64_nop_x (code, 0x2123456789ABCDEFULL);
+ ia64_hint_x (code, 0x2123456789ABCDEFULL);
+
+ ia64_movl_pred (code, 1, 1, 0x123456789ABCDEF0LL);
+
+ /* FLOATING-POINT */
+ ia64_fma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fms_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpms_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_s_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fnma_d_sf_pred (code, 1, 1, 2, 3, 4, 2);
+ ia64_fpnma_sf_pred (code, 1, 1, 2, 3, 4, 2);
+
+ ia64_xma_l_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_h_pred (code, 1, 1, 2, 3, 4);
+ ia64_xma_hu_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fselect_pred (code, 1, 1, 2, 3, 4);
+
+ ia64_fcmp_eq_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_eq_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_lt_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_le_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fcmp_unord_unc_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_fclass_m_pred (code, 1, 1, 2, 3, 0x1ff);
+ ia64_fclass_m_unc_pred (code, 1, 1, 2, 3, 0x1ff);
+
+ ia64_frcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+ ia64_fprcpa_sf_pred (code, 1, 1, 2, 3, 4, 0);
+
+ ia64_frsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+ ia64_fprsqrta_sf_pred (code, 1, 1, 2, 4, 0);
+
+ ia64_fmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_famax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpmin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpman_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamin_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpamax_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_eq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_lt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_le_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_unord_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_neq_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nlt_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_nle_sf_pred (code, 1, 2, 3, 4, 0);
+ ia64_fpcmp_ord_sf_pred (code, 1, 2, 3, 4, 0);
+
+ ia64_fmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fmerge_se_pred (code, 1, 2, 3, 4);
+ ia64_fmix_lr_pred (code, 1, 2, 3, 4);
+ ia64_fmix_r_pred (code, 1, 2, 3, 4);
+ ia64_fmix_l_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_r_pred (code, 1, 2, 3, 4);
+ ia64_fsxt_l_pred (code, 1, 2, 3, 4);
+ ia64_fpack_pred (code, 1, 2, 3, 4);
+ ia64_fswap_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nl_pred (code, 1, 2, 3, 4);
+ ia64_fswap_nr_pred (code, 1, 2, 3, 4);
+ ia64_fand_pred (code, 1, 2, 3, 4);
+ ia64_fandcm_pred (code, 1, 2, 3, 4);
+ ia64_for_pred (code, 1, 2, 3, 4);
+ ia64_fxor_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_s_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_ns_pred (code, 1, 2, 3, 4);
+ ia64_fpmerge_se_pred (code, 1, 2, 3, 4);
+
+ ia64_fcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fx_trunc_sf_pred ((code), 1, 2, 3, 0);
+ ia64_fpcvt_fxu_trunc_sf_pred ((code), 1, 2, 3, 0);
+
+ ia64_fcvt_xf_pred ((code), 1, 2, 3);
+
+ ia64_fsetc_sf_pred ((code), 1, 0x33, 0x33, 3);
+
+ ia64_fclrf_sf_pred ((code), 1, 3);
+
+ ia64_fchkf_sf_pred ((code), 1, -1, 3);
+
+ ia64_break_f_pred ((code), 1, 0x1234);
+
+ ia64_movl (code, 31, -123456);
+
+ ia64_codegen_close (code);
+
+#if 0
+ /* disassembly */
+ {
+ guint8 *buf = code.buf;
+ int template;
+ guint64 dw1, dw2;
+ guint64 ins1, ins2, ins3;
+
+ ia64_break_i (code, 0x1234);
+
+ ia64_codegen_close (code);
+
+ dw1 = ((guint64*)buf) [0];
+ dw2 = ((guint64*)buf) [1];
+
+ template = ia64_bundle_template (buf);
+ ins1 = ia64_bundle_ins1 (buf);
+ ins2 = ia64_bundle_ins2 (buf);
+ ins3 = ia64_bundle_ins3 (buf);
+
+ code.buf = buf;
+ ia64_emit_bundle_template (&code, template, ins1, ins2, ins3);
+
+ g_assert (dw1 == ((guint64*)buf) [0]);
+ g_assert (dw2 == ((guint64*)buf) [1]);
+ }
+#endif
+
+ mono_disassemble_code (buf, 40960, "code");
+
+ return 0;
+}
diff --git a/src/arch/ia64/ia64-codegen.h b/src/arch/ia64/ia64-codegen.h
new file mode 100644
index 0000000..1793580
--- /dev/null
+++ b/src/arch/ia64/ia64-codegen.h
@@ -0,0 +1,3183 @@
+/*
+ * ia64-codegen.h: Macros for generating ia64 code
+ *
+ * Authors:
+ * Zoltan Varga (vargaz@gmail.com)
+ *
+ * (C) 2005 Novell, Inc.
+ */
+
+#ifndef _IA64_CODEGEN_H_
+#define _IA64_CODEGEN_H_
+
+#include <glib.h>
+#include <string.h>
+
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+
+typedef enum {
+ IA64_INS_TYPE_A,
+ IA64_INS_TYPE_I,
+ IA64_INS_TYPE_M,
+ IA64_INS_TYPE_F,
+ IA64_INS_TYPE_B,
+ IA64_INS_TYPE_LX
+} Ia64InsType;
+
+typedef enum {
+ IA64_TEMPLATE_MII = 0x00,
+ IA64_TEMPLATE_MIIS = 0x01,
+ IA64_TEMPLATE_MISI = 0x02,
+ IA64_TEMPLATE_MISIS = 0x03,
+ IA64_TEMPLATE_MLX = 0x04,
+ IA64_TEMPLATE_MLXS = 0x05,
+ IA64_TEMPLATE_UNUS1 = 0x06,
+ IA64_TEMPLATE_UNUS2 = 0x07,
+ IA64_TEMPLATE_MMI = 0x08,
+ IA64_TEMPLATE_MMIS = 0x09,
+ IA64_TEMPLATE_MSMI = 0x0A,
+ IA64_TEMPLATE_MSMIS = 0x0B,
+ IA64_TEMPLATE_MFI = 0x0C,
+ IA64_TEMPLATE_MFIS = 0x0D,
+ IA64_TEMPLATE_MMF = 0x0E,
+ IA64_TEMPLATE_MMFS = 0x0F,
+ IA64_TEMPLATE_MIB = 0x10,
+ IA64_TEMPLATE_MIBS = 0x11,
+ IA64_TEMPLATE_MBB = 0x12,
+ IA64_TEMPLATE_MBBS = 0x13,
+ IA64_TEMPLATE_UNUS3 = 0x14,
+ IA64_TEMPLATE_UNUS4 = 0x15,
+ IA64_TEMPLATE_BBB = 0x16,
+ IA64_TEMPLATE_BBBS = 0x17,
+ IA64_TEMPLATE_MMB = 0x18,
+ IA64_TEMPLATE_MMBS = 0x19,
+ IA64_TEMPLATE_UNUS5 = 0x1A,
+ IA64_TEMPLATE_UNUS6 = 0x1B,
+ IA64_TEMPLATE_MFB = 0x1C,
+ IA64_TEMPLATE_MFBS = 0x1D,
+ IA64_TEMPLATE_UNUS7 = 0x1E,
+ IA64_TEMPLATE_UNUS8 = 0x1F,
+} Ia64BundleTemplate;
+
+typedef enum {
+ IA64_R0 = 0,
+ IA64_R1 = 1,
+ IA64_R2 = 2,
+ IA64_R3 = 3,
+ IA64_R4 = 4,
+ IA64_R5 = 5,
+ IA64_R6 = 6,
+ IA64_R7 = 7,
+ IA64_R8 = 8,
+ IA64_R9 = 9,
+ IA64_R10 = 10,
+ IA64_R11 = 11,
+ IA64_R12 = 12,
+ IA64_R13 = 13,
+ IA64_R14 = 14,
+ IA64_R15 = 15,
+ IA64_R16 = 16,
+ IA64_R17 = 17,
+ IA64_R18 = 18,
+ IA64_R19 = 19,
+ IA64_R20 = 20,
+ IA64_R21 = 21,
+ IA64_R22 = 22,
+ IA64_R23 = 23,
+ IA64_R24 = 24,
+ IA64_R25 = 25,
+ IA64_R26 = 26,
+ IA64_R27 = 27,
+ IA64_R28 = 28,
+ IA64_R29 = 29,
+ IA64_R30 = 30,
+ IA64_R31 = 31,
+
+ /* Aliases */
+ IA64_GP = IA64_R1,
+ IA64_SP = IA64_R12,
+ IA64_TP = IA64_R13
+} Ia64GeneralRegister;
+
+typedef enum {
+ IA64_B0 = 0,
+ IA64_B1 = 1,
+ IA64_B2 = 2,
+ IA64_B3 = 3,
+ IA64_B4 = 4,
+ IA64_B5 = 5,
+ IA64_B6 = 6,
+ IA64_B7 = 7,
+
+ /* Aliases */
+ IA64_RP = IA64_B0
+} Ia64BranchRegister;
+
+typedef enum {
+ IA64_CCV = 32,
+ IA64_PFS = 64
+} Ia64ApplicationRegister;
+
+/* disassembly */
+#define ia64_bundle_template(code) ((*(guint64*)(gpointer)code) & 0x1f)
+#define ia64_bundle_ins1(code) (((*(guint64*)(gpointer)code) >> 5) & 0x1ffffffffff)
+#define ia64_bundle_ins2(code) (((*(guint64*)(gpointer)code) >> 46) | ((((guint64*)(gpointer)code)[1] & 0x7fffff) << 18))
+#define ia64_bundle_ins3(code) ((((guint64*)(gpointer)code)[1]) >> 23)
+
+#define ia64_ins_opcode(ins) (((guint64)(ins)) >> 37)
+#define ia64_ins_qp(ins) (((guint64)(ins)) & 0x3f)
+#define ia64_ins_r1(ins) ((((guint64)(ins)) >> 6) & 0x7f)
+#define ia64_ins_r2(ins) ((((guint64)(ins)) >> 13) & 0x7f)
+#define ia64_ins_r3(ins) ((((guint64)(ins)) >> 20) & 0x7f)
+
+#define ia64_ins_b1(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_b2(ins) ((((guint64)(ins)) >> 13) & 0x7)
+#define ia64_ins_btype(ins) ((((guint64)(ins)) >> 6) & 0x7)
+#define ia64_ins_x(ins) ((((guint64)(ins)) >> 22) & 0x1)
+#define ia64_ins_x2a(ins) ((((guint64)(ins)) >> 34) & 0x3)
+#define ia64_ins_x2b(ins) ((((guint64)(ins)) >> 27) & 0x3)
+#define ia64_ins_x3(ins) ((((guint64)(ins)) >> 33) & 0x7)
+#define ia64_ins_x4(ins) ((((guint64)(ins)) >> 29) & 0xf)
+#define ia64_ins_x6(ins) ((((guint64)(ins)) >> 27) & 0x3f)
+#define ia64_ins_y(ins) ((((guint64)(ins)) >> 26) & 0x1)
+#define ia64_ins_vc(ins) ((((guint64)(ins)) >> 20) & 0x1)
+#define ia64_ins_ve(ins) ((((guint64)(ins)) >> 33) & 0x1)
+
+#define IA64_NOP_I ((0x01 << 27))
+#define IA64_NOP_M ((0x01 << 27))
+#define IA64_NOP_B (((long)0x02 << 37))
+#define IA64_NOP_F ((0x01 << 27))
+#define IA64_NOP_X ((0x01 << 27))
+
+/*
+ * READ_PR_BRANCH and WRITE_PR_FLOAT are used to be able to place comparisons
+ * + branches in the same instruction group.
+ */
+typedef enum {
+ IA64_READ_GR,
+ IA64_WRITE_GR,
+ IA64_READ_PR,
+ IA64_WRITE_PR,
+ IA64_READ_PR_BRANCH,
+ IA64_WRITE_PR_FLOAT,
+ IA64_READ_BR,
+ IA64_WRITE_BR,
+ IA64_READ_BR_BRANCH,
+ IA64_READ_FR,
+ IA64_WRITE_FR,
+ IA64_READ_AR,
+ IA64_WRITE_AR,
+ IA64_NO_STOP,
+ IA64_END_OF_INS,
+ IA64_NONE
+} Ia64Dependency;
+
+/*
+ * IA64 code cannot be emitted in the same way as code on other processors,
+ * since 3 instructions are combined into a bundle. This structure keeps track
+ * of already emitted instructions.
+ *
+ */
+
+#define IA64_INS_BUFFER_SIZE 4
+#define MAX_UNW_OPS 8
+
+typedef struct {
+ guint8 *buf;
+ guint one_ins_per_bundle : 1;
+ int nins, template, dep_info_pos, unw_op_pos, unw_op_count;
+ guint64 instructions [IA64_INS_BUFFER_SIZE];
+ int itypes [IA64_INS_BUFFER_SIZE];
+ guint8 *region_start;
+ guint8 dep_info [128];
+ unw_dyn_op_t unw_ops [MAX_UNW_OPS];
+ /* The index of the instruction to which the given unw op belongs */
+ guint8 unw_ops_pos [MAX_UNW_OPS];
+} Ia64CodegenState;
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+G_GNUC_UNUSED static void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#else
+void ia64_emit_bundle (Ia64CodegenState *code, gboolean flush);
+#endif
+
+#define ia64_codegen_init(code, codegen_buf) do { \
+ code.buf = codegen_buf; \
+ code.region_start = code.buf; \
+ code.nins = 0; \
+ code.one_ins_per_bundle = 0; \
+ code.dep_info_pos = 0; \
+ code.unw_op_count = 0; \
+ code.unw_op_pos = 0; \
+} while (0)
+
+#define ia64_codegen_close(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_begin_bundle(code) do { \
+ ia64_emit_bundle (&code, TRUE); \
+} while (0)
+
+#define ia64_codegen_set_one_ins_per_bundle(code, is_one) do { \
+ ia64_begin_bundle (code); \
+ code.one_ins_per_bundle = (is_one); \
+} while (0)
+
+#define ia64_begin_bundle_template(code, bundle_template) do { \
+ ia64_emit_bundle (&code, TRUE); \
+ code.template = (bundle_template); \
+} while (0)
+
+#define ia64_unw_save_reg(code, reg, dreg) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_save_reg (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, -1, reg, dreg); \
+} while (0)
+
+#define ia64_unw_add(code, reg, val) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_add (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, reg, val); \
+} while (0)
+
+#define ia64_unw_pop_frames(code, nframes) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_pop_frames (&(code.unw_ops [code.unw_op_count ++]), _U_QP_TRUE, code.nins, (nframes)); \
+} while (0)
+
+#define ia64_unw_label_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_label_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+
+#define ia64_unw_copy_state(code, id) do { \
+ g_assert (code.unw_op_count <= MAX_UNW_OPS); \
+ code.unw_ops_pos [code.unw_op_count] = code.nins; \
+ _U_dyn_op_copy_state (&(code.unw_ops [code.unw_op_count ++]), (id)); \
+} while (0)
+
+#if 0
+/* To ease debugging, emit instructions immediately */
+#define EMIT_BUNDLE(itype, code) ((itype != IA64_INS_TYPE_LX) || (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#else
+#define EMIT_BUNDLE(itype, code) if ((itype == IA64_INS_TYPE_LX) && (code.nins == 2)) ia64_emit_bundle (&code, FALSE);
+#endif
+
+#define ia64_emit_ins(code, itype, ins) do { \
+ code.instructions [code.nins] = ins; \
+ code.itypes [code.nins] = itype; \
+ code.nins ++; \
+ code.dep_info [code.dep_info_pos ++] = IA64_END_OF_INS; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+ EMIT_BUNDLE (itype, code); \
+ if (code.nins == IA64_INS_BUFFER_SIZE) \
+ ia64_emit_bundle (&code, FALSE); \
+} while (0)
+
+#define ia64_no_stop(code) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_NO_STOP; \
+ code.dep_info [code.dep_info_pos ++] = 0; \
+} while (0)
+
+#if G_BYTE_ORDER != G_LITTLE_ENDIAN
+#error "FIXME"
+#endif
+
+#define ia64_emit_bundle_template(code, template, i1, i2, i3) do { \
+ guint64 *buf64 = (guint64*)(gpointer)(code)->buf; \
+ guint64 dw1, dw2; \
+ dw1 = (((guint64)(template)) & 0x1f) | ((guint64)(i1) << 5) | ((((guint64)(i2)) & 0x3ffff) << 46); \
+ dw2 = (((guint64)(i2)) >> 18) | (((guint64)(i3)) << 23); \
+ buf64[0] = dw1; \
+ buf64[1] = dw2; \
+ (code)->buf += 16; \
+} while (0)
+
+#ifdef IA64_SIMPLE_EMIT_BUNDLE
+
+G_GNUC_UNUSED static void
+ia64_emit_bundle (Ia64CodegenState *code, gboolean flush)
+{
+ int i;
+
+ for (i = 0; i < code->nins; ++i) {
+ switch (code->itypes [i]) {
+ case IA64_INS_TYPE_A:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_I:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_M:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIIS, code->instructions [i], IA64_NOP_I, IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_B:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MIBS, IA64_NOP_M, IA64_NOP_I, code->instructions [i]);
+ break;
+ case IA64_INS_TYPE_F:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MFIS, IA64_NOP_M, code->instructions [i], IA64_NOP_I);
+ break;
+ case IA64_INS_TYPE_LX:
+ ia64_emit_bundle_template (code, IA64_TEMPLATE_MLXS, IA64_NOP_M, code->instructions [i], code->instructions [i + 1]);
+ i ++;
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+
+ code->nins = 0;
+ code->dep_info_pos = 0;
+}
+
+#endif /* IA64_SIMPLE_EMIT_BUNDLE */
+
+#define ia64_is_imm8(imm) (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define ia64_is_imm14(imm) (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define ia64_is_imm21(imm) (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+
+#define ia64_is_adds_imm(imm) ia64_is_imm14((imm))
+
+#if 1
+
+#define check_assert(cond) g_assert((cond))
+
+#else
+
+#define check_assert(cond)
+
+#endif
+
+#define check_greg(gr) check_assert ((guint64)(gr) < 128)
+
+#define check_freg(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_fr(fr) check_assert ((guint64)(fr) < 128)
+
+#define check_preg(pr) check_assert ((guint64)(pr) < 64)
+
+#define check_breg(pr) check_assert ((guint64)(pr) < 8)
+
+#define check_count2(count) check_assert (((count) >= 1) && ((count) <= 4))
+
+#define check_count5(count) check_assert (((count) >= 0) && ((count) < 32))
+
+#define check_count6(count) check_assert (((count) >= 0) && ((count) < 64))
+
+#define check_imm1(imm) check_assert (((gint64)(imm) >= -1) && ((gint64)(imm) <= 0))
+#define check_imm3(imm) check_assert (((gint64)(imm) >= -4) && ((gint64)(imm) <= 3))
+#define check_imm8(imm) check_assert (((gint64)(imm) >= -128) && ((gint64)(imm) <= 127))
+#define check_imm9(imm) check_assert (((gint64)(imm) >= -256) && ((gint64)(imm) <= 255))
+#define check_imm14(imm) check_assert (((gint64)(imm) >= -8192) && ((gint64)(imm) <= 8191))
+#define check_imm21(imm) check_assert (((gint64)(imm) >= -0x100000) && ((gint64)(imm) <= (0x100000 - 1)))
+#define check_imm22(imm) check_assert (((gint64)(imm) >= -0x200000) && ((gint64)(imm) <= (0x200000 - 1)))
+#define check_imm62(imm) check_assert (((gint64)(imm) >= -0x2fffffffffffffffLL) && ((gint64)(imm) <= (0x2fffffffffffffffLL - 1)))
+
+#define check_len4(len) check_assert (((gint64)(len) >= 1) && ((gint64)(len) <= 16))
+
+#define check_bwh(bwh) check_assert ((bwh) >= 0 && (bwh) <= IA64_BWH_DPNT)
+
+#define check_ph(ph) check_assert ((ph) >= 0 && (ph) <= IA64_PH_MANY)
+
+#define check_dh(dh) check_assert ((dh) >= 0 && (dh) <= IA64_DH_CLR)
+
+#define check_sf(sf) check_assert ((sf) >= 0 && (sf) <= 3)
+
+#define sign_bit(imm) ((gint64)(imm) < 0 ? 1 : 0)
+
+/* Dependency info */
+#define read_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define write_gr(code, gr) do { \
+ check_greg ((gr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_GR; \
+ code.dep_info [code.dep_info_pos ++] = gr; \
+} while (0)
+
+#define read_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define write_pr(code,pr) do { \
+ if ((pr) != 0) { \
+ check_preg ((pr)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR; \
+ code.dep_info [code.dep_info_pos ++] = (pr); \
+ } \
+} while (0)
+
+#define read_pr_branch(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_PR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_pr_fp(code,reg) do { \
+ check_preg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_PR_FLOAT; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_br(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_BR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_br_branch(code,reg) do { \
+ check_breg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_BR_BRANCH; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_fr(code,reg) do { \
+ check_freg ((reg)); \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_FR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define read_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_READ_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define write_ar(code,reg) do { \
+ code.dep_info [code.dep_info_pos ++] = IA64_WRITE_AR; \
+ code.dep_info [code.dep_info_pos ++] = (reg); \
+} while (0)
+
+#define ia64_emit_ins_1(code,itype,f1,o1) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1))))
+
+#define ia64_emit_ins_3(code,itype,f1,o1,f2,o2,f3,o3) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3))))
+
+#define ia64_emit_ins_5(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5))))
+
+#define ia64_emit_ins_6(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6))))
+
+#define ia64_emit_ins_7(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7))))
+
+#define ia64_emit_ins_8(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8))))
+
+#define ia64_emit_ins_9(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9))))
+
+#define ia64_emit_ins_10(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10))))
+
+#define ia64_emit_ins_11(code,itype,f1,o1,f2,o2,f3,o3,f4,o4,f5,o5,f6,o6,f7,o7,f8,o8,f9,o9,f10,o10,f11,o11) ia64_emit_ins ((code), (itype), (((guint64)(f1) << (o1)) | ((guint64)(f2) << (o2)) | ((guint64)(f3) << (o3)) | ((guint64)(f4) << (o4)) | ((guint64)(f5) << (o5)) | ((guint64)(f6) << (o6)) | ((guint64)(f7) << (o7)) | ((guint64)(f8) << (o8)) | ((guint64)(f9) << (o9)) | ((guint64)(f10) << (o10)) | ((guint64)(f11) << (o11))))
+
+/*
+ * A-Unit instructions
+ */
+
+#define ia64_a1(code, qp, r1, r2, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_add_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 0)
+#define ia64_add1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 0, 1)
+#define ia64_sub_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 1)
+#define ia64_sub1_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 1, 0)
+#define ia64_addp4_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 2, 0)
+#define ia64_and_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 0)
+#define ia64_andcm_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 1)
+#define ia64_or_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 2)
+#define ia64_xor_pred(code, qp, r1, r2, r3) ia64_a1 ((code), (qp), r1, r2, r3, 0, 0, 3, 3)
+
+#define ia64_a2(code, qp, r1, r2, r3, x2a, ve, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 (ct2d); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d - 1), 27, (x4), 29, (ve), 33, (x2a), 34, (8), 37); } while (0)
+
+#define ia64_shladd_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 4, (count))
+#define ia64_shladdp4_pred(code, qp, r1, r2, r3,count) ia64_a2 ((code), (qp), r1, r2, r3, 0, 0, 6, (count))
+
+#define ia64_a3(code, qp, r1, imm8, r3, x2a, ve, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm8 ((imm8)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (imm8) & 0x7f, 13, (r3), 20, (x2b), 27, (x4), 29, (ve), 33, (x2a), 34, sign_bit((imm8)), 36, (8), 37); } while (0)
+
+#define ia64_sub_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 9, 1)
+#define ia64_and_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 0)
+#define ia64_andcm_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 1)
+#define ia64_or_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 2)
+#define ia64_xor_imm_pred(code, qp,r1,imm8,r3) ia64_a3 ((code), (qp), (r1), (imm8), (r3), 0, 0, 0xb, 3)
+
+#define ia64_a4(code, qp, r1, imm14, r3, x2a, ve) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_imm14 ((imm14)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((imm14) & 0x7f), 13, (r3), 20, (((guint64)(imm14) >> 7) & 0x3f), 27, (ve), 33, (x2a), 34, sign_bit ((imm14)), 36, (8), 37); } while (0)
+
+#define ia64_adds_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 2, 0)
+#define ia64_addp4_imm_pred(code, qp,r1,imm14,r3) ia64_a4 ((code), (qp), (r1), (imm14), (r3), 3, 0)
+
+#define ia64_a5(code, qp, r1, imm, r3) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_assert ((r3) < 4); check_imm22 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (r3), 20, (((guint64)(imm) >> 12) & 0x1f), 22, (((guint64)(imm) >> 7) & 0x1ff), 27, sign_bit ((imm)), 36, (9), 37); } while (0)
+
+#define ia64_addl_imm_pred(code, qp, r1, imm22, r3) ia64_a5 ((code), (qp), (r1), (imm22), (r3))
+
+#define ia64_a6(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 0)
+#define ia64_cmp_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 0)
+#define ia64_cmp_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 0)
+#define ia64_cmp_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 0, 1)
+#define ia64_cmp_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 0, 1)
+#define ia64_cmp_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 0, 1)
+#define ia64_cmp_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 0)
+#define ia64_cmp_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 0)
+#define ia64_cmp_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 0, 1, 1)
+#define ia64_cmp_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 0, 1, 1)
+
+#define ia64_cmp4_lt_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 0)
+#define ia64_cmp4_ltu_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 0)
+#define ia64_cmp4_eq_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 0)
+#define ia64_cmp4_lt_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 0, 1)
+#define ia64_cmp4_ltu_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 0, 1)
+#define ia64_cmp4_eq_unc_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 0, 1)
+#define ia64_cmp4_eq_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 0)
+#define ia64_cmp4_eq_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 0)
+#define ia64_cmp4_ne_and_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 0, 1, 1)
+#define ia64_cmp4_ne_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a6 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 0, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_cmp4_ne_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_le_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gt_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_ge_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), (qp), (p2), (p1), (r2), (r3))
+#define ia64_cmp4_leu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r3), (r2))
+#define ia64_cmp4_gtu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p1), (p2), (r3), (r2))
+#define ia64_cmp4_geu_pred(code, qp, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), (qp), (p2), (p1), (r2), (r3))
+
+#define ia64_a7(code, qp, p1, p2, r2, r3, opcode, x2, tb, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert ((r2) == 0); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, (r2), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 0)
+#define ia64_cmp_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 0)
+#define ia64_cmp_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 0, 1)
+#define ia64_cmp_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 0, 1)
+#define ia64_cmp_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 0, 1)
+#define ia64_cmp_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 0)
+#define ia64_cmp_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 0)
+#define ia64_cmp_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 0, 1, 1, 1)
+#define ia64_cmp_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 0, 1, 1, 1)
+
+#define ia64_cmp4_gt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 0)
+#define ia64_cmp4_gt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 0)
+#define ia64_cmp4_le_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 0, 1)
+#define ia64_cmp4_le_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 0, 1)
+#define ia64_cmp4_ge_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 0)
+#define ia64_cmp4_ge_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 0)
+#define ia64_cmp4_lt_and_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xc, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xd, 1, 1, 1, 1)
+#define ia64_cmp4_lt_or_andcm_pred(code, qp, p1, p2, r2, r3) ia64_a7 ((code), (qp), (p1), (p2), (r2), (r3), 0xe, 1, 1, 1, 1)
+
+#define ia64_a8(code, qp, p1, p2, imm, r3, opcode, x2, ta, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); read_gr ((code), (r3)); check_imm8 ((imm)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (p1), 6, (c), 12, ((guint64)(imm) & 0x7f), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_cmp_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 0)
+#define ia64_cmp_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 0)
+#define ia64_cmp_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 0)
+#define ia64_cmp_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 0, 1)
+#define ia64_cmp_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 0, 1)
+#define ia64_cmp_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 0, 1)
+#define ia64_cmp_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 0)
+#define ia64_cmp_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 0)
+#define ia64_cmp_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 0)
+#define ia64_cmp_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 2, 1, 1)
+#define ia64_cmp_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 2, 1, 1)
+#define ia64_cmp_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 2, 1, 1)
+
+#define ia64_cmp4_lt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 0)
+#define ia64_cmp4_ltu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 0)
+#define ia64_cmp4_eq_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 0)
+#define ia64_cmp4_lt_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 0, 1)
+#define ia64_cmp4_ltu_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 0, 1)
+#define ia64_cmp4_eq_unc_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 0, 1)
+#define ia64_cmp4_eq_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 0)
+#define ia64_cmp4_eq_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 0)
+#define ia64_cmp4_eq_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 0)
+#define ia64_cmp4_ne_and_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xc, 3, 1, 1)
+#define ia64_cmp4_ne_or_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xd, 3, 1, 1)
+#define ia64_cmp4_ne_or_andcm_imm_pred(code, qp, p1, p2, imm8, r3) ia64_a8 ((code), (qp), (p1), (p2), (imm8), (r3), 0xe, 3, 1, 1)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_cmp4_ne_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_le_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gt_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_ge_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+#define ia64_cmp4_leu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p1), (p2), (imm8) - 1, (r3))
+#define ia64_cmp4_gtu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8) - 1, (r3))
+#define ia64_cmp4_geu_imm_pred(code, qp, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), (qp), (p2), (p1), (imm8), (r3))
+
+#define ia64_a9(code, qp, r1, r2, r3, x2a, za, zb, x4, x2b) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_padd1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0)
+#define ia64_padd2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 0)
+#define ia64_padd4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0)
+#define ia64_padd1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 1)
+#define ia64_padd2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 1)
+#define ia64_padd1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2)
+#define ia64_padd2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 2)
+#define ia64_padd1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 3)
+#define ia64_padd2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 0, 3)
+
+#define ia64_psub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 0)
+#define ia64_psub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 0)
+#define ia64_psub4_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 1, 0)
+#define ia64_psub1_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 1)
+#define ia64_psub2_sss_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 1)
+#define ia64_psub1_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 2)
+#define ia64_psub2_uuu_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 2)
+#define ia64_psub1_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 1, 3)
+#define ia64_psub2_uus_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 1, 3)
+
+#define ia64_pavg1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2)
+#define ia64_pavg2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 2)
+#define ia64_pavg1_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 3)
+#define ia64_pavg2_raz_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 2, 3)
+#define ia64_pavgsub1_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 3, 2)
+#define ia64_pavgsub2_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 3, 2)
+#define ia64_pcmp1_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 0)
+#define ia64_pcmp2_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 0)
+#define ia64_pcmp4_eq_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 0)
+#define ia64_pcmp1_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 9, 1)
+#define ia64_pcmp2_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 9, 1)
+#define ia64_pcmp4_gt_pred(code, qp,r1,r2,r3) ia64_a9 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 9, 1)
+
+#define ia64_a10(code, qp, r1, r2, r3, x2a, za, zb, x4, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count2 ((ct2d)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_A, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (ct2d) - 1, 27, (x4), 29, (zb), 33, (x2a), 34, (za), 36, (8), 37); } while (0)
+
+#define ia64_pshladd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 4, count);
+#define ia64_pshradd2_pred(code, qp, r1, r2, r3, count) ia64_a10 ((code), (qp), (r1), (r2), (r3), 1, 0, 1, 6, count);
+
+#define encode_pmpyshr_count(count) (((count) == 0) ? 0 : (((count) == 7) ? 1 : (((count) == 15) ? 2 : 3)))
+
+/*
+ * I-Unit Instructions
+ */
+
+#define ia64_i1(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, ct2d) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_assert (((ct2d) == 0) | ((ct2d) == 7) | ((ct2d) == 15) | ((ct2d) == 16)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, encode_pmpyshr_count((ct2d)), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpyshr2_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 3, (count));
+
+#define ia64_pmpyshr2_u_pred(code, qp, r1, r2, r3, count) ia64_i1 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 1, (count));
+
+#define ia64_i2(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pmpy2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 1, 3)
+#define ia64_pmpy2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 3)
+#define ia64_mix1_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 2)
+#define ia64_mix2_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_r_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_mix1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 2)
+#define ia64_mix2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 2)
+#define ia64_mix4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 2)
+#define ia64_pack2_uss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 0)
+#define ia64_pack2_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 0)
+#define ia64_pack4_sss_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 0)
+#define ia64_unpack1_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 0, 1)
+#define ia64_unpack2_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 0, 1)
+#define ia64_unpack4_h_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 0, 1)
+#define ia64_unpack1_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 2, 1)
+#define ia64_unpack2_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 2, 1)
+#define ia64_unpack4_l_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 2, 2, 1)
+#define ia64_pmin1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 0)
+#define ia64_pmax1_u_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 1, 1)
+#define ia64_pmin2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 0)
+#define ia64_pmax2_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 2, 3, 1)
+#define ia64_psad1_pred(code, qp, r1, r2, r3) ia64_i2 ((code), (qp), (r1), (r2), (r3), 0, 0, 0, 2, 3, 2)
+
+typedef enum {
+ IA64_MUX1_BRCST = 0x0,
+ IA64_MUX1_MIX = 0x8,
+ IA64_MUX1_SHUF = 0x9,
+ IA64_MUX1_ALT = 0xa,
+ IA64_MUX1_REV = 0xb
+} Ia64Mux1Permutation;
+
+#define ia64_i3(code, qp, r1, r2, mbtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mbtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux1_pred(code, qp, r1, r2, mbtype) ia64_i3 ((code), (qp), (r1), (r2), (mbtype), 7, 0, 0, 0, 3, 2, 2)
+
+#define ia64_i4(code, qp, r1, r2, mhtype, opcode, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (mhtype), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (opcode), 37); } while (0)
+
+#define ia64_mux2_pred(code, qp, r1, r2, mhtype) ia64_i4 ((code), (qp), (r1), (r2), (mhtype), 7, 0, 1, 0, 3, 2, 2)
+
+#define ia64_i5(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 2, 0)
+#define ia64_pshr4_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 2, 0)
+#define ia64_shr_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 2, 0)
+#define ia64_pshr2_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 0)
+#define ia64_pshr4_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 0)
+#define ia64_shr_u_pred(code, qp, r1, r3, r2) ia64_i5 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 0)
+
+#define ia64_i6(code, qp, r1, count, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (count), 14, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshr2_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 3, 0)
+#define ia64_pshr4_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 3, 0)
+#define ia64_pshr2_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 0, 1, 0, 1, 1, 0)
+#define ia64_pshr4_u_imm_pred(code, qp, r1, r3, count) ia64_i6 ((code), (qp), (r1), (count), (r3), 1, 0, 0, 1, 1, 0)
+
+#define ia64_i7(code, qp, r1, r2, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 0, 1, 0, 0, 0, 1)
+#define ia64_pshl4_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 0, 0, 0, 0, 1)
+#define ia64_shl_pred(code, qp, r1, r2, r3) ia64_i7 ((code), (qp), (r1), (r2), (r3), 1, 1, 0, 0, 0, 1)
+
+#define ia64_i8(code, qp, r1, r2, count, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); check_count5 ((count)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, 31 - (count), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_pshl2_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 0, 1, 0, 3, 1, 1)
+#define ia64_pshl4_imm_pred(code, qp, r1, r2, count) ia64_i8 ((code), (qp), (r1), (r2), (count), 1, 0, 0, 3, 1, 1)
+
+#define ia64_i9(code, qp, r1, r3, za, zb, ve, x2a, x2b, x2c) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, 0, 13, (r3), 20, (x2b), 28, (x2c), 30, (ve), 32, (zb), 33, (x2a), 34, (za), 36, (7), 37); } while (0)
+
+#define ia64_popcnt_pred(code, qp, r1, r3) ia64_i9 ((code), (qp), (r1), (r3), 0, 1, 0, 1, 1, 2)
+
+#define ia64_i10(code, qp, r1, r2, r3, count, opcode, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_count6 ((count)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (count), 27, (x), 33, (x2), 34, (opcode), 37); } while (0)
+
+#define ia64_shrp_pred(code, qp, r1, r2, r3, count) ia64_i10 ((code), (qp), (r1), (r2), ( r3), (count), 5, 3, 0)
+
+#define ia64_i11(code, qp, r1, r3, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((pos) << 1) | (y), 13, (r3), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_extr_u_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 0)
+#define ia64_extr_pred(code, qp, r1, r3, pos, len) ia64_i11 ((code), (qp), (r1), (r3), (pos), (len), 1, 0, 1)
+
+#define ia64_i12(code, qp, r1, r2, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, (5), 37); } while (0)
+
+#define ia64_dep_z_pred(code, qp, r1, r2, pos, len) ia64_i12 ((code), (qp), (r1), (r2), (pos), (len), 1, 1, 0)
+
+#define ia64_i13(code, qp, r1, imm, pos, len, x2, x, y) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, ((guint64)(imm) & 0x7f), 13, (63 - (pos)) | ((y) << 6), 20, (len) - 1, 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_z_imm_pred(code, qp, r1, imm, pos, len) ia64_i13 ((code), (qp), (r1), (imm), (pos), (len), 1, 1, 1)
+
+#define ia64_i14(code, qp, r1, imm, r3, pos, len, x2, x) do { read_pr ((code), (qp)); write_gr ((code), (r1)); check_imm1 (imm); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (63 - (pos)) << 1, 13, (r3), 20, (len), 27, (x), 33, (x2), 34, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_dep_imm_pred(code, qp, r1, imm, r3, pos, len) ia64_i14 ((code), (qp), (r1), (imm), (r3), (pos), (len), 3, 1)
+
+#define ia64_i15(code, qp, r1, r2, r3, pos, len) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r2)); read_gr ((code), (r3)); check_len4 ((len)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (len) - 1, 27, (63 - (pos)), 31, (4), 37); } while (0)
+
+#define ia64_dep_pred(code, qp, r1, r2, r3, pos, len) ia64_i15 ((code), (qp), (r1), (r2), (r3), (pos), (len))
+
+#define ia64_i16(code, qp, p1, p2, r3, pos, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_11 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (pos), 14, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tbit_z_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 0)
+#define ia64_tbit_z_unc_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 0, 0, 1)
+#define ia64_tbit_z_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 0)
+#define ia64_tbit_nz_and_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 0, 1, 0, 1)
+#define ia64_tbit_z_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 0)
+#define ia64_tbit_nz_or_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 0, 0, 1)
+#define ia64_tbit_z_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 0)
+#define ia64_tbit_nz_or_andcm_pred(code, qp, p1, p2, r3, pos) ia64_i16 ((code), (qp), (p1), (p2), (r3), (pos), 0, 1, 1, 0, 1)
+
+#define ia64_i17(code, qp, p1, p2, r3, x2, ta, tb, y, c) do { read_pr ((code), (qp)); write_pr ((code), (p1)); write_pr ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_I, (qp), 0, (p1), 6, (c), 12, (y), 13, (r3), 20, (p2), 27, (ta), 33, (x2), 34, (tb), 36, (5), 37); } while (0)
+
+#define ia64_tnat_z_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 0)
+#define ia64_tnat_z_unc_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 0, 1, 1)
+#define ia64_tnat_z_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 0)
+#define ia64_tnat_nz_and_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 0, 1, 1, 1)
+#define ia64_tnat_z_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 0)
+#define ia64_tnat_nz_or_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 0, 1, 1)
+#define ia64_tnat_z_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 0)
+#define ia64_tnat_nz_or_andcm_pred(code, qp, p1, p2, r3) ia64_i17 ((code), (qp), (p1), (p2), (r3), 0, 1, 1, 1, 1)
+
+#define ia64_i18(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 0)
+#define ia64_hint_i_pred(code, qp, imm) ia64_i18 ((code), (qp), (imm), 0, 1, 1)
+
+#define ia64_i19(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x3), 33, ((imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_i_pred(code, qp, imm) ia64_i19 ((code), (qp), (imm), 0, 0)
+
+#define ia64_i20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); check_imm21 ((imm)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_s_i_pred(code, qp,r2,disp) ia64_i20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_i21(code, qp, b1, r2, tag13, x3, x, ih, wh) do { read_pr ((code), (qp)); check_imm8 (tag13); write_br ((code), (b1)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_I, (qp), 0, (b1), 6, (r2), 13, (wh), 20, (x), 22, (ih), 23, (tag13) & 0x1ff, 24, (x3), 33, (0), 37); } while (0)
+
+typedef enum {
+ IA64_MOV_TO_BR_WH_SPTK = 0,
+ IA64_MOV_TO_BR_WH_NONE = 1,
+ IA64_MOV_TO_BR_WH_DPTK = 2
+} Ia64MovToBrWhetherHint;
+
+typedef enum {
+ IA64_BR_IH_NONE = 0,
+ IA64_BR_IH_IMP = 1
+} Ia64BranchImportanceHint;
+
+#define ia64_mov_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 0, ih, wh)
+#define ia64_mov_ret_to_br_hint_pred(code, qp, b1, r2, disp, wh, ih) ia64_i21 ((code), (qp), (b1), (r2), (disp), 7, 1, ih, wh)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br_pred(code, qp, b1, r2) ia64_mov_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+#define ia64_mov_ret_to_br_pred(code, qp, b1, r2) ia64_mov_ret_to_br_hint_pred ((code), (qp), (b1), (r2), 0, 0, 0)
+
+/* End of pseudo ops */
+
+#define ia64_i22(code, qp, r1, b2, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_br ((code), (b2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (b2), 13, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_br_pred(code, qp, r1, b2) ia64_i22 ((code), (qp), (r1), (b2), 0, 0x31);
+
+#define ia64_i23(code, qp, r2, mask, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (mask) & 0x7f, 6, (r2), 13, ((mask) >> 7) & 0xff, 24, (x3), 33, sign_bit ((mask)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_pred(code, qp, r2, mask) ia64_i23 ((code), (qp), (r2), (mask) >> 1, 3)
+
+#define ia64_i24(code, qp, imm, x3) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7ffffff, 6, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_pred_rot_imm_pred(code, qp,imm) ia64_i24 ((code), (qp), (imm) >> 16, 2)
+
+#define ia64_i25(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ip_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x30)
+#define ia64_mov_from_pred_pred(code, qp, r1) ia64_i25 ((code), (qp), (r1), 0, 0x33)
+
+#define ia64_i26(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_i_pred(code, qp, ar3, r2) ia64_i26 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_i27(code, qp, ar3, imm, x3, x6) do { read_pr ((code), (qp)); write_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_I, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x6), 27, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_i_pred(code, qp, ar3, imm) ia64_i27 ((code), (qp), (ar3), (imm), 0, 0x0a)
+
+#define ia64_i28(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_mov_from_ar_i_pred(code, qp, r1, ar3) ia64_i28 ((code), (qp), (r1), (ar3), 0, 0x32)
+
+#define ia64_i29(code, qp, r1, r3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_I, (qp), 0, (r1), 6, (r3), 20, (x6), 27, (x3), 33, (0), 37); } while (0)
+
+#define ia64_zxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x10)
+#define ia64_zxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x11)
+#define ia64_zxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x12)
+#define ia64_sxt1_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x14)
+#define ia64_sxt2_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x15)
+#define ia64_sxt4_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x16)
+#define ia64_czx1_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x18)
+#define ia64_czx2_l_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x19)
+#define ia64_czx1_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1C)
+#define ia64_czx2_r_pred(code, qp, r1, r3) ia64_i29 ((code), (qp), (r1), (r3), 0, 0x1D)
+
+/*
+ * M-Unit Instructions
+ */
+
+typedef enum {
+ IA64_LD_HINT_NONE = 0,
+ IA64_LD_HINT_NT1 = 1,
+ IA64_LD_HINT_NTA = 3
+} Ia64LoadHint;
+
+typedef enum {
+ IA64_ST_HINT_NONE = 0,
+ IA64_ST_HINT_NTA = 3
+} Ia64StoreHint;
+
+#define ia64_m1(code, qp, r1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x00)
+#define ia64_ld2_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ld4_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ld8_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x03)
+
+#define ia64_ld1_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x04)
+#define ia64_ld2_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ld4_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ld8_s_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x07)
+
+#define ia64_ld1_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x08)
+#define ia64_ld2_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ld4_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ld8_a_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0B)
+
+#define ia64_ld1_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0C)
+#define ia64_ld2_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ld4_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ld8_sa_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x0F)
+
+#define ia64_ld1_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x10)
+#define ia64_ld2_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x11)
+#define ia64_ld4_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x12)
+#define ia64_ld8_bias_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x13)
+
+#define ia64_ld1_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x14)
+#define ia64_ld2_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x15)
+#define ia64_ld4_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x16)
+#define ia64_ld8_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x17)
+
+#define ia64_ld8_fill_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_ld1_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x20)
+#define ia64_ld2_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ld4_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ld8_c_clr_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x23)
+
+#define ia64_ld1_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x24)
+#define ia64_ld2_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ld4_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ld8_c_nc_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x28)
+#define ia64_ld2_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x29)
+#define ia64_ld4_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 0, 0x2B)
+
+/* FIXME: This writes AR.CSD */
+#define ia64_ld16_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x28);
+#define ia64_ld16_acq_hint_pred(code, qp, r1, r3, hint) ia64_m1 ((code), (qp), (r1), (r3), (hint), 0, 1, 0x2C)
+
+#define ia64_m2(code, qp, r1, r2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); ; ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_ld1_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_hint_pred(code, qp, r1, r2, r3, hint) ia64_m2 ((code), (qp), (r1), (r2), (r3), (hint), 1, 0, 0x2B)
+
+#define ia64_m3(code, qp, r1, r3, imm, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_ld1_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x00)
+#define ia64_ld2_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x01)
+#define ia64_ld4_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x02)
+#define ia64_ld8_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x03)
+
+#define ia64_ld1_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x04)
+#define ia64_ld2_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x05)
+#define ia64_ld4_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x06)
+#define ia64_ld8_s_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x07)
+
+#define ia64_ld1_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x08)
+#define ia64_ld2_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x09)
+#define ia64_ld4_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0A)
+#define ia64_ld8_a_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0B)
+
+#define ia64_ld1_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0C)
+#define ia64_ld2_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0D)
+#define ia64_ld4_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0E)
+#define ia64_ld8_sa_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x0F)
+
+#define ia64_ld1_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x10)
+#define ia64_ld2_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x11)
+#define ia64_ld4_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x12)
+#define ia64_ld8_bias_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x13)
+
+#define ia64_ld1_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x14)
+#define ia64_ld2_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x15)
+#define ia64_ld4_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x16)
+#define ia64_ld8_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x17)
+
+#define ia64_ld8_fill_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x1B)
+
+#define ia64_ld1_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x20)
+#define ia64_ld2_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x21)
+#define ia64_ld4_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x22)
+#define ia64_ld8_c_clr_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x23)
+
+#define ia64_ld1_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x24)
+#define ia64_ld2_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x25)
+#define ia64_ld4_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x26)
+#define ia64_ld8_c_nc_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x27)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x28)
+#define ia64_ld2_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x29)
+#define ia64_ld4_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2A)
+#define ia64_ld8_c_clr_acq_inc_imm_hint_pred(code, qp, r1, r3, imm, hint) ia64_m3 ((code), (qp), (r1), (r3), (imm), (hint), 1, 0, 0x2B)
+
+/* Pseudo ops */
+
+#define ia64_ld1_pred(code, qp, r1, r3) ia64_ld1_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_pred(code, qp, r1, r3) ia64_ld2_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_pred(code, qp, r1, r3) ia64_ld4_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_pred(code, qp, r1, r3) ia64_ld8_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_s_pred(code, qp, r1, r3) ia64_ld1_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_s_pred(code, qp, r1, r3) ia64_ld2_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_s_pred(code, qp, r1, r3) ia64_ld4_s_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_s_pred(code, qp, r1, r3) ia64_ld8_s_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_a_pred(code, qp, r1, r3) ia64_ld1_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_a_pred(code, qp, r1, r3) ia64_ld2_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_a_pred(code, qp, r1, r3) ia64_ld4_a_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_a_pred(code, qp, r1, r3) ia64_ld8_a_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_sa_pred(code, qp, r1, r3) ia64_ld1_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_sa_pred(code, qp, r1, r3) ia64_ld2_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_sa_pred(code, qp, r1, r3) ia64_ld4_sa_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_sa_pred(code, qp, r1, r3) ia64_ld8_sa_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_bias_pred(code, qp, r1, r3) ia64_ld1_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_bias_pred(code, qp, r1, r3) ia64_ld2_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_bias_pred(code, qp, r1, r3) ia64_ld4_bias_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_bias_pred(code, qp, r1, r3) ia64_ld8_bias_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_acq_pred(code, qp, r1, r3) ia64_ld1_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_acq_pred(code, qp, r1, r3) ia64_ld2_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_acq_pred(code, qp, r1, r3) ia64_ld4_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_acq_pred(code, qp, r1, r3) ia64_ld8_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld8_fill_pred(code, qp, r1, r3) ia64_ld8_fill_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_pred(code, qp, r1, r3) ia64_ld1_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_pred(code, qp, r1, r3) ia64_ld2_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_pred(code, qp, r1, r3) ia64_ld4_c_clr_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_pred(code, qp, r1, r3) ia64_ld8_c_clr_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_nc_pred(code, qp, r1, r3) ia64_ld1_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_nc_pred(code, qp, r1, r3) ia64_ld2_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_nc_pred(code, qp, r1, r3) ia64_ld4_c_nc_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_nc_pred(code, qp, r1, r3) ia64_ld8_c_nc_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq_pred(code, qp, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld2_c_clr_acq_pred(code, qp, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld4_c_clr_acq_pred(code, qp, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld8_c_clr_acq_pred(code, qp, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld16_pred(code, qp, r1, r3) ia64_ld16_hint_pred (code, qp, r1, r3, 0)
+#define ia64_ld16_acq_pred(code, qp, r1, r3) ia64_ld16_acq_hint_pred (code, qp, r1, r3, 0)
+
+#define ia64_ld1_inc_pred(code, qp, r1, r2, r3) ia64_ld1_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_inc_pred(code, qp, r1, r2, r3) ia64_ld2_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_inc_pred(code, qp, r1, r2, r3) ia64_ld4_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_inc_pred(code, qp, r1, r2, r3) ia64_ld8_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc_pred(code, qp, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_s_inc_pred(code, qp, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_s_inc_pred(code, qp, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_s_inc_pred(code, qp, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc_pred(code, qp, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_a_inc_pred(code, qp, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_a_inc_pred(code, qp, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_a_inc_pred(code, qp, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc_pred(code, qp, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc_pred(code, qp, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc_pred(code, qp, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc_pred(code, qp, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, qp, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm_pred(code, qp, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, qp, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m4(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_st1_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x30)
+#define ia64_st2_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x31)
+#define ia64_st4_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x32)
+#define ia64_st8_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x33)
+
+/* Pseudo ops */
+
+#define ia64_st8_pred(code, qp, r3, r2) ia64_st8_hint_pred ((code), (qp), (r3), (r2), 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 0, 0x3B)
+
+#define ia64_st16_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x30)
+#define ia64_st16_rel_hint_pred(code, qp, r3, r2, hint) ia64_m4 ((code), (qp), (r3), (r2), (hint), 0, 1, 0x34)
+
+#define ia64_m5(code, qp, r3, r2, imm, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (5), 37); } while (0)
+
+#define ia64_st1_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x30)
+#define ia64_st2_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x31)
+#define ia64_st4_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x32)
+#define ia64_st8_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x33)
+
+#define ia64_st1_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x34)
+#define ia64_st2_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x35)
+#define ia64_st4_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x36)
+#define ia64_st8_rel_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x37)
+
+#define ia64_st8_spill_inc_imm_hint_pred(code, qp, r3, r2, imm, hint) ia64_m5 ((code), (qp), (r3), (r2), (imm), (hint), 0, 0, 0x3B)
+
+#define ia64_m6(code, qp, f1, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x02)
+#define ia64_ldfd_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x03)
+#define ia64_ldf8_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x01)
+#define ia64_ldfe_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x00)
+
+#define ia64_ldfs_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x06)
+#define ia64_ldfd_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x07)
+#define ia64_ldf8_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x05)
+#define ia64_ldfe_s_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x04)
+
+#define ia64_ldfs_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0A)
+#define ia64_ldfd_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0B)
+#define ia64_ldf8_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x09)
+#define ia64_ldfe_a_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x08)
+
+#define ia64_ldfs_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0E)
+#define ia64_ldfd_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0F)
+#define ia64_ldf8_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0D)
+#define ia64_ldfe_sa_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x22)
+#define ia64_ldfd_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x23)
+#define ia64_ldf8_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x21)
+#define ia64_ldfe_c_clr_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x20)
+
+#define ia64_ldfs_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x26)
+#define ia64_ldfd_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x27)
+#define ia64_ldf8_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x25)
+#define ia64_ldfe_c_nc_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x24)
+
+#define ia64_ldf_fill_hint_pred(code, qp, f1, r3, hint) ia64_m6 ((code), (qp), (f1), (r3), (hint), 0, 0, 0x1B)
+
+#define ia64_m7(code, qp, f1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfs_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x02)
+#define ia64_ldfd_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x03)
+#define ia64_ldf8_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x01)
+#define ia64_ldfe_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x00)
+
+#define ia64_ldfs_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x06)
+#define ia64_ldfd_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x07)
+#define ia64_ldf8_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x05)
+#define ia64_ldfe_s_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x04)
+
+#define ia64_ldfs_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0A)
+#define ia64_ldfd_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0B)
+#define ia64_ldf8_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x09)
+#define ia64_ldfe_a_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x08)
+
+#define ia64_ldfs_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0E)
+#define ia64_ldfd_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0F)
+#define ia64_ldf8_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0D)
+#define ia64_ldfe_sa_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x0C)
+
+#define ia64_ldfs_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x22)
+#define ia64_ldfd_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x23)
+#define ia64_ldf8_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x21)
+#define ia64_ldfe_c_clr_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x20)
+
+#define ia64_ldfs_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x26)
+#define ia64_ldfd_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x27)
+#define ia64_ldf8_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x25)
+#define ia64_ldfe_c_nc_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x24)
+
+#define ia64_ldf_fill_inc_hint_pred(code, qp, f1, r3, r2, hint) ia64_m7 ((code), (qp), (f1), (r3), (r2), (hint), 1, 0, 0x1B)
+
+#define ia64_m8(code, qp, f1, r3, imm, hint, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_ldfs_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x02)
+#define ia64_ldfd_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x03)
+#define ia64_ldf8_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x01)
+#define ia64_ldfe_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x00)
+
+#define ia64_ldfs_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x06)
+#define ia64_ldfd_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x07)
+#define ia64_ldf8_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x05)
+#define ia64_ldfe_s_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x04)
+
+#define ia64_ldfs_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0A)
+#define ia64_ldfd_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0B)
+#define ia64_ldf8_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x09)
+#define ia64_ldfe_a_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x08)
+
+#define ia64_ldfs_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0E)
+#define ia64_ldfd_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0F)
+#define ia64_ldf8_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0D)
+#define ia64_ldfe_sa_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x0C)
+
+#define ia64_ldfs_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x22)
+#define ia64_ldfd_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x23)
+#define ia64_ldf8_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x21)
+#define ia64_ldfe_c_clr_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x20)
+
+#define ia64_ldfs_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x26)
+#define ia64_ldfd_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x27)
+#define ia64_ldf8_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x25)
+#define ia64_ldfe_c_nc_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x24)
+
+#define ia64_ldf_fill_inc_imm_hint_pred(code, qp, f1, r3, imm, hint) ia64_m8 ((code), (qp), (f1), (r3), (imm), (hint), 0x1B)
+
+/* Pseudo ops */
+
+#define ia64_ldfs_pred(code, qp, f1, r3) ia64_ldfs_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_pred(code, qp, f1, r3) ia64_ldfd_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_pred(code, qp, f1, r3) ia64_ldf8_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_pred(code, qp, f1, r3) ia64_ldfe_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_s_pred(code, qp, f1, r3) ia64_ldfs_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_s_pred(code, qp, f1, r3) ia64_ldfd_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_s_pred(code, qp, f1, r3) ia64_ldf8_s_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_s_pred(code, qp, f1, r3) ia64_ldfe_s_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_a_pred(code, qp, f1, r3) ia64_ldfs_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_a_pred(code, qp, f1, r3) ia64_ldfd_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_a_pred(code, qp, f1, r3) ia64_ldf8_a_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_a_pred(code, qp, f1, r3) ia64_ldfe_a_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_sa_pred(code, qp, f1, r3) ia64_ldfs_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_sa_pred(code, qp, f1, r3) ia64_ldfd_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_sa_pred(code, qp, f1, r3) ia64_ldf8_sa_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_sa_pred(code, qp, f1, r3) ia64_ldfe_sa_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_clr_pred(code, qp, f1, r3) ia64_ldfs_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_clr_pred(code, qp, f1, r3) ia64_ldfd_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_clr_pred(code, qp, f1, r3) ia64_ldf8_c_clr_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_clr_pred(code, qp, f1, r3) ia64_ldfe_c_clr_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_c_nc_pred(code, qp, f1, r3) ia64_ldfs_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfd_c_nc_pred(code, qp, f1, r3) ia64_ldfd_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldf8_c_nc_pred(code, qp, f1, r3) ia64_ldf8_c_nc_hint_pred (code, qp, f1, r3, 0)
+#define ia64_ldfe_c_nc_pred(code, qp, f1, r3) ia64_ldfe_c_nc_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldf_fill_pred(code, qp, f1, r3) ia64_ldf_fill_hint_pred (code, qp, f1, r3, 0)
+
+#define ia64_ldfs_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_s_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_s_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_s_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_a_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_a_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_a_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_sa_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_sa_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_clr_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_clr_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfs_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfd_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfd_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldf8_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldf8_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+#define ia64_ldfe_c_nc_inc_pred(code, qp, f1, r3, r2) ia64_ldfe_c_nc_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldf_fill_inc_pred(code, qp, f1, r3, r2) ia64_ldf_fill_inc_hint_pred (code, qp, f1, r3, r2, 0)
+
+#define ia64_ldfs_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_s_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_s_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_a_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_a_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_sa_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_sa_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_clr_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldfs_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfd_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldf8_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+#define ia64_ldfe_c_nc_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+#define ia64_ldf_fill_inc_imm_pred(code, qp, f1, r3, imm) ia64_ldf_fill_inc_imm_hint_pred (code, qp, f1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_m9(code, qp, r3, f2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_stfs_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x32)
+#define ia64_stfd_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x33)
+#define ia64_stf8_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x31)
+#define ia64_stfe_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x30)
+#define ia64_stf_spill_hint_pred(code, qp, r3, f2, hint) ia64_m9 ((code), (qp), (r3), (f2), (hint), 0, 0, 0x3B)
+
+#define ia64_m10(code, qp, r3, f2, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_fr ((code), (f2)); check_imm9 ((imm)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_stfs_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x32)
+#define ia64_stfd_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x33)
+#define ia64_stf8_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x31)
+#define ia64_stfe_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x30)
+#define ia64_stf_spill_inc_imm_hint_pred(code, qp, r3, f2, imm, hint) ia64_m10 ((code), (qp), (r3), (f2), (imm), (hint), 0x3B)
+
+#define ia64_m11(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x02)
+#define ia64_ldfpd_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x03)
+#define ia64_ldfp8_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x01)
+
+#define ia64_ldfps_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x06)
+#define ia64_ldfpd_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x07)
+#define ia64_ldfp8_s_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x05)
+
+#define ia64_ldfps_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0A)
+#define ia64_ldfpd_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0B)
+#define ia64_ldfp8_a_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x09)
+
+#define ia64_ldfps_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0E)
+#define ia64_ldfpd_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0F)
+#define ia64_ldfp8_sa_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x22)
+#define ia64_ldfpd_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x23)
+#define ia64_ldfp8_c_clr_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x21)
+
+#define ia64_ldfps_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x26)
+#define ia64_ldfpd_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x27)
+#define ia64_ldfp8_c_nc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m11 ((code), (qp), (f1), (f2), (r3), (hint), 0, 1, 0x25)
+
+#define ia64_m12(code, qp, f1, f2, r3, hint, m, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_fr ((code), (f2)); read_gr ((code), (r3)); write_gr ((code), (r3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (f2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_ldfps_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x02)
+#define ia64_ldfpd_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x03)
+#define ia64_ldfp8_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x01)
+
+#define ia64_ldfps_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x06)
+#define ia64_ldfpd_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x07)
+#define ia64_ldfp8_s_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x05)
+
+#define ia64_ldfps_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0A)
+#define ia64_ldfpd_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0B)
+#define ia64_ldfp8_a_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x09)
+
+#define ia64_ldfps_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0E)
+#define ia64_ldfpd_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0F)
+#define ia64_ldfp8_sa_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x0D)
+
+#define ia64_ldfps_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x22)
+#define ia64_ldfpd_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x23)
+#define ia64_ldfp8_c_clr_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x21)
+
+#define ia64_ldfps_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x26)
+#define ia64_ldfpd_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x27)
+#define ia64_ldfp8_c_nc_inc_hint_pred(code, qp, f1, f2, r3, hint) ia64_m12 ((code), (qp), (f1), (f2), (r3), (hint), 1, 1, 0x25)
+
+typedef enum {
+ IA64_LFHINT_NONE = 0,
+ IA64_LFHINT_NT1 = 1,
+ IA64_LFHINT_NT2 = 2,
+ IA64_LFHINT_NTA = 3
+} Ia64LinePrefetchHint;
+
+#define ia64_m13(code, qp, r3, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2C)
+#define ia64_lfetch_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2D)
+#define ia64_lfetch_fault_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2E)
+#define ia64_lfetch_fault_excl_hint_pred(code, qp, r3, hint) ia64_m13 ((code), (qp), (r3), (hint), 0, 0, 0x2F)
+
+#define ia64_m14(code, qp, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_lfetch_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2C)
+#define ia64_lfetch_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2D)
+#define ia64_lfetch_fault_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2E)
+#define ia64_lfetch_fault_excl_inc_hint_pred(code, qp, r3, r2, hint) ia64_m14 ((code), (qp), (r3), (r2), (hint), 1, 0, 0x2F)
+
+#define ia64_m15(code, qp, r3, imm, hint, x6) do { read_pr ((code), (qp)); read_gr ((code), (r3)); write_gr ((code), (r3)); check_imm9 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (r3), 20, ((imm) >> 7) & 0x1, 27, (hint), 28, (x6), 30, sign_bit ((imm)), 36, (7), 37); } while (0)
+
+#define ia64_lfetch_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2C)
+#define ia64_lfetch_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2D)
+#define ia64_lfetch_fault_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2E)
+#define ia64_lfetch_fault_excl_inc_imm_hint_pred(code, qp, r3, imm, hint) ia64_m15 ((code), (qp), (r3), (imm), (hint), 0x2F)
+
+#define ia64_m16(code, qp, r1, r3, r2, hint, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); read_gr ((code), (r2)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (r2), 13, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_cmpxchg1_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x00)
+#define ia64_cmpxchg2_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x01)
+#define ia64_cmpxchg4_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x02)
+#define ia64_cmpxchg8_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x03)
+#define ia64_cmpxchg1_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x04)
+#define ia64_cmpxchg2_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x05)
+#define ia64_cmpxchg4_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x06)
+#define ia64_cmpxchg8_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x07)
+#define ia64_cmpxchg16_acq_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x20)
+#define ia64_cmpxchg16_rel_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x24)
+#define ia64_xchg1_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x08)
+#define ia64_xchg2_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x09)
+#define ia64_xchg4_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0A)
+#define ia64_xchg8_hint_pred(code, qp, r1, r3, r2, hint) ia64_m16 ((code), (qp), (r1), (r3), (r2), (hint), 0, 1, 0x0B)
+
+#define encode_inc3(inc3) ((inc3) == 16 ? 0 : ((inc3) == 8 ? 1 : ((inc3) == 4 ? 2 : 3)))
+
+#define ia64_m17(code, qp, r1, r3, imm, hint, m, x, x6) do { int aimm; read_pr ((code), (qp)); write_gr ((code), (r1)); read_gr ((code), (r3)); aimm = (imm) < 0 ? - (imm) : (imm); check_assert ((aimm) == 16 || (aimm) == 8 || (aimm) == 4 || (aimm) == 1); ia64_emit_ins_10 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, encode_inc3 (aimm), 13, sign_bit ((imm)), 15, (r3), 20, (x), 27, (hint), 28, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_fetchadd4_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x12)
+#define ia64_fetchadd8_acq_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x13)
+#define ia64_fetchadd4_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x16)
+#define ia64_fetchadd8_rel_hint_pred(code, qp, r1, r3, inc, hint) ia64_m17 ((code), (qp), (r1), (r3), (inc), (hint), 0, 1, 0x17)
+
+#define ia64_m18(code, qp, f1, r2, m, x, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_fr ((code), (f1)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (r2), 13, (x), 27, (x6), 30, (m), 36, (6), 37); } while (0)
+
+#define ia64_setf_sig_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1C)
+#define ia64_setf_exp_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1D)
+#define ia64_setf_s_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1E)
+#define ia64_setf_d_pred(code, qp, f1, r2) ia64_m18 ((code), (qp), (f1), (r2), 0, 1, 0x1F)
+
+#define ia64_m19(code, qp, r1, f2, m, x, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (f2), 13, (x), 27, (x6), 30, (m), 36, (4), 37); } while (0)
+
+#define ia64_getf_sig_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1C)
+#define ia64_getf_exp_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1D)
+#define ia64_getf_s_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1E)
+#define ia64_getf_d_pred(code, qp, r1, f2) ia64_m19 ((code), (qp), (r1), (f2), 0, 1, 0x1F)
+
+#define ia64_m20(code, qp, r2, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (r2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_m_pred(code, qp,r2,disp) ia64_m20 ((code), (qp), (r2), (disp), 1)
+
+#define ia64_m21(code, qp, f2, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f2)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 6, (f2), 13, ((imm) >> 7) & 0x1fff, 20, (x3), 33, sign_bit ((imm)), 36, (1), 37); } while (0)
+
+#define ia64_chk_s_float_m_pred(code, qp,f2,disp) ia64_m21 ((code), (qp), (f2), (disp), 3)
+
+#define ia64_m22(code, qp, r1, imm, x3) do { read_pr ((code), (qp)); read_gr ((code), (r1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 4)
+#define ia64_chk_a_clr_pred(code, qp,r1,disp) ia64_m22 ((code), (qp), (r1), (disp), 5)
+
+#define ia64_m23(code, qp, f1, imm, x3) do { read_pr ((code), (qp)); read_fr ((code), (f1)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (imm) & 0xfffff, 13, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_chk_a_nc_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 6)
+#define ia64_chk_a_clr_float_pred(code, qp,f1,disp) ia64_m23 ((code), (qp), (f1), (disp), 7)
+
+#define ia64_m24(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 1)
+#define ia64_fwb_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 2)
+#define ia64_mf_pred(code, qp) ia64_m24 ((code), (qp), 0, 2, 2)
+#define ia64_mf_a_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 2)
+#define ia64_srlz_d_pred(code, qp) ia64_m24 ((code), (qp), 0, 0, 3)
+#define ia64_stlz_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 1, 3)
+#define ia64_sync_i_pred(code, qp) ia64_m24 ((code), (qp), 0, 3, 3)
+
+#define ia64_m25(code, qp, x3, x4, x2) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_flushrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0xC, 0)
+#define ia64_loadrs_pred(code, qp) ia64_m24 ((code), (qp), 0, 0XA, 0)
+
+#define ia64_m26(code, qp, r1, x3, x4, x2) do { read_pr ((code), (qp)); read_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_pred(code, qp, r1) ia64_m26 ((code), (qp), (r1), 0, 2, 1)
+
+#define ia64_m27(code, qp, f1, x3, x4, x2) do { read_pr ((code), (qp)); read_fr ((code), (f1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (f1), 6, (x4), 27, (x2), 31, (x3), 33, (0), 37); } while (0)
+
+#define ia64_invala_e_float_pred(code, qp, f1) ia64_m26 ((code), (qp), (f1), 0, 3, 1)
+
+#define ia64_m28(code, qp, r3, x3, x6, x) do { read_pr ((code), (qp)); read_gr ((code), (r3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r3), 20, (x6), 27, (x3), 33, (x), 36, (1), 37); } while (0)
+
+#define ia64_fc_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 0)
+#define ia64_fc_i_pred(code, qp, r3) ia64_m28 ((code), (qp), (r3), 0, 0x30, 1)
+
+#define ia64_m29(code, qp, ar3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); write_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_ar_m_pred(code, qp, ar3, r2) ia64_m29 ((code), (qp), (ar3), (r2), 0, 0x2a)
+
+#define ia64_m30(code, qp, ar3, imm, x3, x4, x2) do { read_pr ((code), (qp)); read_ar ((code), (ar3)); check_imm8 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0x7f, 13, (ar3), 20, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_mov_to_ar_imm_m_pred(code, qp, ar3, imm) ia64_m30 ((code), (qp), (ar3), (imm), 0, 8, 2)
+
+#define ia64_m31(code, qp, r1, ar3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); read_ar ((code), (ar3)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (ar3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_ar_m_pred(code, qp, r1, ar3) ia64_m31 ((code), (qp), (r1), (ar3), 0, 0x22)
+
+#define ia64_m32(code, qp, cr3, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_cr_pred(code, qp, cr3, r2) ia64_m32 ((code), (qp), (cr3), (r2), 0, 0x2C)
+
+#define ia64_m33(code, qp, r1, cr3, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (cr3), 20, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_cr_pred(code, qp, r1, cr3) ia64_m33 ((code), (qp), (r1), (cr3), 0, 0x24)
+
+#define ia64_m34(code, qp, r1, sor, sol, sof, x3) do { ia64_begin_bundle ((code)); read_pr ((code), (qp)); write_gr ((code), (r1)); check_assert ((guint64)(sor) <= 0xf); check_assert ((guint64)(sol) <= 0x7f); check_assert ((guint64)(sof) <= 96); check_assert ((code).nins == 0); check_assert ((qp) == 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (sof), 13, (sol), 20, (sor), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_alloc_pred(code, qp, r1, i, l, o, r) do { read_pr ((code), (qp)); check_assert (((r) % 8) == 0); check_assert ((r) <= (i) + (l) + (o)); ia64_m34 ((code), (qp), (r1), (r) >> 3, (i) + (l), (i) + (l) + (o), 6); } while (0)
+
+#define ia64_m35(code, qp, r2, x3, x6) do { read_pr ((code), (qp)); read_gr ((code), (r2)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r2), 13, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_to_psr_l_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x2D)
+#define ia64_mov_to_psr_um_pred(code, qp, r2) ia64_m35 ((code), (qp), (r2), 0, 0x29)
+
+#define ia64_m36(code, qp, r1, x3, x6) do { read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_M, (qp), 0, (r1), 6, (x6), 27, (x3), 33, (1), 37); } while (0)
+
+#define ia64_mov_from_psr_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x25)
+#define ia64_mov_from_psr_um_pred(code, qp, r1) ia64_m36 ((code), (qp), (r1), 0, 0x21)
+
+#define ia64_m37(code, qp, imm, x3, x2, x4) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_break_m_pred(code, qp, imm) ia64_m37 ((code), (qp), (imm), 0, 0, 0)
+
+/* The System/Memory Management instruction encodings (M38-M47) are missing */
+
+#define ia64_m48(code, qp, imm, x3, x4, x2, y) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_M, (qp), 0, (imm) & 0xfffff, 6, (y), 26, (x4), 27, (x2), 31, (x3), 33, sign_bit ((imm)), 36, (0), 37); } while (0)
+
+#define ia64_nop_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 0)
+#define ia64_hint_m_pred(code, qp, imm) ia64_m48 ((code), (qp), (imm), 0, 1, 0, 1)
+
+typedef enum {
+ IA64_BWH_SPTK = 0,
+ IA64_BWH_SPNT = 1,
+ IA64_BWH_DPTK = 2,
+ IA64_BWH_DPNT = 3
+} Ia64BranchWhetherHint;
+
+typedef enum {
+ IA64_PH_FEW = 0,
+ IA64_PH_MANY = 1
+} Ia64SeqPrefetchHint;
+
+typedef enum {
+ IA64_DH_NONE = 0,
+ IA64_DH_CLR = 1
+} Ia64BranchCacheDeallocHint;
+
+#define ia64_b1(code, qp, imm, bwh, ph, dh, btype) do { read_pr_branch ((code), (qp)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+#define ia64_br_wexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 2)
+#define ia64_br_wtop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b1 ((code), (qp), (disp), (bwh), (ph), (dh), 3)
+
+#define ia64_b2(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); check_imm21 ((imm)); check_assert ((qp) == 0); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (4), 37); } while (0)
+
+#define ia64_br_cloop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 5)
+#define ia64_br_cexit_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 6)
+#define ia64_br_ctop_hint_pred(code, qp, disp, bwh, ph, dh) ia64_b2 ((code), (qp), (disp), (bwh), (ph), (dh), 7)
+
+#define ia64_b3(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); check_imm21 ((imm)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (imm) & 0xfffff, 13, (bwh), 33, (dh), 35, sign_bit ((imm)), 36, (5), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_b3 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_b4(code, qp, b2, bwh, ph, dh, x6, btype) do { read_pr ((code), (qp)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_B, (qp), 0, (btype), 6, (ph), 12, (b2), 13, (x6), 27, (bwh), 33, (dh), 35, (0), 37); } while (0)
+
+#define ia64_br_cond_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 0)
+#define ia64_br_ia_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x20, 1)
+#define ia64_br_ret_reg_hint_pred(code, qp, b1, bwh, ph, dh) ia64_b4 ((code), (qp), (b1), (bwh), (ph), (dh), 0x21, 4)
+
+#define ia64_b5(code, qp, b1, b2, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); read_br_branch ((code), (b2)); check_bwh ((bwh)); check_ph ((ph)); check_dh ((dh)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_B, (qp), 0, (b1), 6, (ph), 12, (b2), 13, ((bwh) * 2) + 1, 32, (dh), 35, (1), 37); ia64_begin_bundle ((code)); } while (0)
+
+#define ia64_br_call_reg_hint_pred(code, qp, b1, b2, bwh, ph, dh) ia64_b5 ((code), (qp), (b1), (b2), (bwh), (ph), (dh))
+
+/* Pseudo ops */
+
+#define ia64_br_cond_pred(code, qp, disp) ia64_br_cond_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wexit_pred(code, qp, disp) ia64_br_wexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_wtop_pred(code, qp, disp) ia64_br_wtop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_cloop_pred(code, qp, disp) ia64_br_cloop_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_cexit_pred(code, qp, disp) ia64_br_cexit_hint_pred (code, qp, disp, 0, 0, 0)
+#define ia64_br_ctop_pred(code, qp, disp) ia64_br_ctop_hint_pred (code, qp, disp, 0, 0, 0)
+
+#define ia64_br_call_pred(code, qp, b1, disp) ia64_br_call_hint_pred (code, qp, b1, disp, 0, 0, 0)
+
+#define ia64_br_cond_reg_pred(code, qp, b1) ia64_br_cond_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ia_reg_pred(code, qp, b1) ia64_br_ia_reg_hint_pred (code, qp, b1, 0, 0, 0)
+#define ia64_br_ret_reg_pred(code, qp, b1) ia64_br_ret_reg_hint_pred (code, qp, b1, 0, 0, 0)
+
+#define ia64_br_call_reg_pred(code, qp, b1, b2) ia64_br_call_reg_hint_pred (code, qp, b1, b2, 0, 0, 0)
+
+/* End of pseudo ops */
+
+typedef enum {
+ IA64_IPWH_SPTK = 0,
+ IA64_IPWH_LOOP = 1,
+ IA64_IPWH_DPTK = 2,
+ IA64_IPWH_EXIT = 3
+} Ia64IPRelativeBranchWhetherHint;
+
+/* B6 and B7 is missing */
+
+#define ia64_b8(code, qp, x6) do { read_pr ((code), (qp)); ia64_emit_ins_3 ((code), IA64_INS_TYPE_B, (qp), 0, (x6), 27, (0), 37); } while (0)
+
+#define ia64_cover_pred(code, qp) ia64_b8 ((code), (qp), 0x02)
+#define ia64_clrrrb_pred(code, qp) ia64_b8 ((code), (qp), 0x04)
+#define ia64_clrrrb_pr_pred(code, qp) ia64_b8 ((code), (qp), 0x05)
+#define ia64_rfi_pred(code, qp) ia64_b8 ((code), (qp), 0x08)
+#define ia64_bsw_0_pred(code, qp) ia64_b8 ((code), (qp), 0x0C)
+#define ia64_bsw_1_pred(code, qp) ia64_b8 ((code), (qp), 0x0D)
+#define ia64_epc_pred(code, qp) ia64_b8 ((code), (qp), 0x10)
+
+#define ia64_b9(code, qp, imm, opcode, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_B, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 0, 0x00)
+#define ia64_nop_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x00)
+#define ia64_hint_b_pred(code, qp, imm) ia64_b9 ((code), (qp), (imm), 2, 0x01)
+
+/*
+ * F-Unit Instructions
+ */
+
+#define ia64_f1(code, qp, f1, f3, f4, f2, sf, opcode, x) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); read_fr ((code), (f4)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (sf), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 0)
+#define ia64_fma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 8, 1)
+#define ia64_fma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 0)
+#define ia64_fpma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 9, 1)
+#define ia64_fms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 0)
+#define ia64_fms_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xA, 1)
+#define ia64_fms_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 0)
+#define ia64_fpms_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xB, 1)
+#define ia64_fnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 0)
+#define ia64_fnma_s_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xC, 1)
+#define ia64_fnma_d_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 0)
+#define ia64_fpnma_sf_pred(code, qp, f1, f3, f4, f2, sf) ia64_f1 ((code), (qp), (f1), (f3), (f4), (f2), (sf), 0xD, 1)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf_pred(code, qp, f1, f3, sf) ia64_fma_s_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+#define ia64_fnorm_d_sf_pred(code, qp, f1, f3, sf) ia64_fma_d_sf_pred ((code), (qp), (f1), (f3), 1, 0, (sf))
+
+#define ia64_f2(code, qp, f1, f3, f4, f2, opcode, x, x2) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x2), 34, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_xma_l_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 0)
+#define ia64_xma_h_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 3)
+#define ia64_xma_hu_pred(code, qp, f1, f3, f4, f2) ia64_f2 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 1, 2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_lu_pred(code, qp, f1, f3, f4) ia64_xma_l_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_h_pred(code, qp, f1, f3, f4) ia64_xma_h_pred ((code), (qp), (f1), (f3), (f4), 0)
+#define ia64_xmpy_hu_pred(code, qp, f1, f3, f4) ia64_xma_hu_pred ((code), (qp), (f1), (f3), (f4), 0)
+
+#define ia64_f3(code, qp, f1, f3, f4, f2, opcode, x) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f3)); read_fr ((code), (f4)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (f4), 27, (x), 36, (opcode), 37); } while (0)
+
+#define ia64_fselect_pred(code, qp, f1, f3, f4, f2) ia64_f3 ((code), (qp), (f1), (f3), (f4), (f2), 0xE, 0)
+
+#define ia64_f4(code, qp, p1, p2, f2, f3, sf, opcode, ra, rb, ta) do { read_pr ((code), (qp)); read_fr ((code), (f2)); read_fr ((code), (f3)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); ia64_emit_ins_10 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (f3), 20, (p2), 27, (ra), 33, (sf), 34, (rb), 36, (opcode), 37); } while (0)
+
+#define ia64_fcmp_eq_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 0)
+#define ia64_fcmp_lt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 0)
+#define ia64_fcmp_le_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 0)
+#define ia64_fcmp_unord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 0)
+#define ia64_fcmp_eq_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 0, 1)
+#define ia64_fcmp_lt_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 0, 1, 1)
+#define ia64_fcmp_le_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 0, 1)
+#define ia64_fcmp_unord_unc_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_f4 ((code), (qp), (p1), (p2), (f2), (f3), (sf), 0x4, 1, 1, 1)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p1), (p2), (f3), (f2), (sf))
+#define ia64_fcmp_ne_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nlt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_nle_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+#define ia64_fcmp_ngt_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_nge_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), (qp), (p2), (p1), (f3), (f2), (sf))
+#define ia64_fcmp_ord_sf_pred(code, qp, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), (qp), (p2), (p1), (f2), (f3), (sf))
+
+#define ia64_f5(code, qp, p1, p2, f2, fclass, opcode, ta) do { read_pr ((code), (qp)); write_pr_fp ((code), (p1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (p1), 6, (ta), 12, (f2), 13, (((guint64)(fclass)) >> 2) & 0x7f, 20, (p2), 27, ((guint64)(fclass)) & 0x3, 33, (opcode), 37); } while (0)
+
+#define ia64_fclass_m_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 0)
+#define ia64_fclass_m_unc_pred(code, qp, p1, p2, f2, fclass) ia64_f5 ((code), (qp), (p1), (p2), (f2), (fclass), 5, 1)
+
+#define ia64_f6(code, qp, f1, p2, f2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_9 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 0, 1, 0)
+#define ia64_fprcpa_sf_pred(code, qp, f1, p2, f2, f3, sf) ia64_f6 ((code), (qp), (f1), (p2), (f2), (f3), (sf), 1, 1, 0)
+
+#define ia64_f7(code, qp, f1, p2, f3, sf, opcode, x, q) do { read_pr ((code), (qp)); write_fr ((code), (f1)); write_pr_fp ((code), (p2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f3), 20, (p2), 27, (x), 33, (sf), 34, (q), 36, (opcode), 37); } while (0)
+
+#define ia64_frsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 0, 1, 1)
+#define ia64_fprsqrta_sf_pred(code, qp, f1, p2, f3, sf) ia64_f7 ((code), (qp), (f1), (p2), (f3), (sf), 1, 1, 1)
+
+#define ia64_f8(code, qp, f1, f2, f3, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_8 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x14)
+#define ia64_fman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x15)
+#define ia64_famin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x16)
+#define ia64_famax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 0, 0, 0x17)
+#define ia64_fpmin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x14)
+#define ia64_fpman_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x15)
+#define ia64_fpamin_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x16)
+#define ia64_fpamax_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x17)
+#define ia64_fpcmp_eq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x30)
+#define ia64_fpcmp_lt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x31)
+#define ia64_fpcmp_le_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x32)
+#define ia64_fpcmp_unord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x33)
+#define ia64_fpcmp_neq_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x34)
+#define ia64_fpcmp_nlt_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x35)
+#define ia64_fpcmp_nle_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x36)
+#define ia64_fpcmp_ord_sf_pred(code, qp, f1, f2, f3, sf) ia64_f8 ((code), (qp), (f1), (f2), (f3), (sf), 1, 0, 0x37)
+
+#define ia64_f9(code, qp, f1, f2, f3, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); read_fr ((code), (f3)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (f3), 20, (x6), 27, (x), 33, (opcode), 37); } while (0)
+
+#define ia64_fmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+#define ia64_fmix_lr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x39)
+#define ia64_fmix_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3A)
+#define ia64_fmix_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3B)
+#define ia64_fsxt_r_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3C)
+#define ia64_fsxt_l_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x3D)
+#define ia64_fpack_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x28)
+#define ia64_fswap_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x34)
+#define ia64_fswap_nl_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x35)
+#define ia64_fswap_nr_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x36)
+#define ia64_fand_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2C)
+#define ia64_fandcm_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2D)
+#define ia64_for_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2E)
+#define ia64_fxor_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x2F)
+#define ia64_fpmerge_s_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x10)
+#define ia64_fpmerge_ns_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x11)
+#define ia64_fpmerge_se_pred(code, qp, f1, f2, f3) ia64_f9 ((code), (qp), (f1), (f2), (f3), 0, 0, 0x12)
+
+/* Pseudo ops */
+#define ia64_fmov_pred(code, qp, f1, f3) ia64_fmerge_s_pred ((code), (qp), (f1), (f3), (f3))
+
+#define ia64_f10(code, qp, f1, f2, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_sf ((sf)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x18)
+#define ia64_fcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x19)
+#define ia64_fcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1A)
+#define ia64_fcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 0, 0, 0x1B)
+#define ia64_fpcvt_fx_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x18)
+#define ia64_fpcvt_fxu_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x19)
+#define ia64_fpcvt_fx_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1A)
+#define ia64_fpcvt_fxu_trunc_sf_pred(code, qp, f1, f2, sf) ia64_f10 ((code), (qp), (f1), (f2), (sf), 1, 0, 0x1B)
+
+#define ia64_f11(code, qp, f1, f2, opcode, x, x6) do { read_pr ((code), (qp)); write_fr ((code), (f1)); read_fr ((code), (f2)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (f1), 6, (f2), 13, (x6), 27, (x), 34, (opcode), 37); } while (0)
+
+#define ia64_fcvt_xf_pred(code, qp, f1, f2) ia64_f11 ((code), (qp), (f1), (f2), 0, 0, 0x1C)
+
+#define ia64_f12(code, qp, amask, omask, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (amask) & 0x3f, 13, (omask) & 0x3f, 20, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fsetc_sf_pred(code, qp, amask, omask, sf) ia64_f12 ((code), (qp), (amask), (omask), (sf), 0, 0, 0x04)
+
+#define ia64_f13(code, qp, sf, opcode, x, x6) do { read_pr ((code), (qp)); ia64_emit_ins_5 ((code), IA64_INS_TYPE_F, (qp), 0, (x6), 27, (x), 33, (sf), 34, (opcode), 37); } while (0)
+
+#define ia64_fclrf_sf_pred(code, qp, sf) ia64_f13 ((code), (qp), (sf), 0, 0, 0x05)
+
+#define ia64_f14(code, qp, imm, sf, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_7 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, (sf), 34, sign_bit ((imm)), 36, (opcode), 37); } while (0)
+
+#define ia64_fchkf_sf_pred(code, qp, disp, sf) ia64_f14 ((code), (qp), (disp), (sf), 0, 0, 0x8)
+
+#define ia64_f15(code, qp, imm, opcode, x, x6) do { read_pr ((code), (qp)); check_imm21 ((imm)); ia64_emit_ins_6 ((code), IA64_INS_TYPE_F, (qp), 0, (imm) & 0xfffff, 6, (x6), 27, (x), 33, ((imm) >> 20) & 0x1, 36, (opcode), 37); } while (0)
+
+#define ia64_break_f_pred(code, qp, imm) ia64_f15 ((code), (qp), (imm), 0, 0, 0x0)
+
+/*
+ * X-UNIT ENCODINGS
+ */
+
+#define ia64_x1(code, qp, imm, x3, x6) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_6 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_break_x_pred(code, qp, imm) ia64_x1 ((code), (qp), (imm), 0, 0x00)
+
+#define ia64_x2(code, qp, r1, imm, vc) do { if (code.nins > IA64_INS_BUFFER_SIZE - 2) ia64_emit_bundle (&(code), FALSE); read_pr ((code), (qp)); write_gr ((code), (r1)); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((gint64)(imm) >> 22) & 0x1ffffffffffULL, 0); ia64_emit_ins_9 ((code), IA64_INS_TYPE_LX, (qp), 0, (r1), 6, (gint64)(imm) & 0x7f, (13), (vc), 20, ((gint64)(imm) >> 21) & 0x1, 21, ((gint64)(imm) >> 16) & 0x1f, 22, ((gint64)(imm) >> 7) & 0x1ff, 27, ((gint64)(imm) >> 63) & 0x1, 36, (6), 37); } while (0)
+
+#define ia64_movl_pred(code, qp, r1, imm) ia64_x2 ((code), (qp), (r1), (imm), 0)
+
+#define ia64_x3(code, qp, imm, bwh, ph, dh, btype) do { read_pr ((code), (qp)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (btype), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xC), 37); } while (0)
+
+#define ia64_brl_cond_hint_pred(code, qp, disp, bwh, ph, dh) ia64_x3 ((code), (qp), (disp), (bwh), (ph), (dh), 0)
+
+#define ia64_x4(code, qp, b1, imm, bwh, ph, dh) do { read_pr ((code), (qp)); write_br ((code), (b1)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 20) & 0x1ffffffffffULL, 0); ia64_emit_ins_8 ((code), IA64_INS_TYPE_LX, (qp), 0, (b1), 6, (ph), 12, (guint64)(imm) & 0xfffff, (13), (bwh), 33, (dh), 35, ((guint64)(imm) >> 59) & 0x1, 36, (0xD), 37); } while (0)
+
+#define ia64_brl_call_hint_pred(code, qp, b1, disp, bwh, ph, dh) ia64_x4 ((code), (qp), (b1), (disp), (bwh), (ph), (dh))
+
+#define ia64_x5(code, qp, imm, x3, x6, y) do { read_pr ((code), (qp)); check_imm62 ((imm)); ia64_begin_bundle (code); ia64_emit_ins_1 ((code), IA64_INS_TYPE_LX, ((guint64)(imm) >> 21) & 0x1ffffffffffULL, 0); ia64_emit_ins_7 ((code), IA64_INS_TYPE_LX, (qp), 0, (guint64)(imm) & 0xfffff, (6), (y), 26, (x6), 27, (x3), 33, ((guint64)(imm) >> 20) & 0x1, 36, (0), 37); } while (0)
+
+#define ia64_nop_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 0)
+#define ia64_hint_x_pred(code, qp, imm) ia64_x5 ((code), (qp), (imm), 0, 0x01, 1)
+
+
+
+
+
+
+/*
+ * Non predicated instruction variants
+ */
+
+
+#define ia64_add(code, r1, r2, r3) ia64_add_pred ((code), 0, r1, r2, r3)
+#define ia64_add1(code, r1, r2, r3) ia64_add1_pred ((code), 0, r1, r2, r3)
+#define ia64_sub(code, r1, r2, r3) ia64_sub_pred ((code), 0, r1, r2, r3)
+#define ia64_sub1(code, r1, r2, r3) ia64_sub1_pred ((code), 0, r1, r2, r3)
+#define ia64_addp4(code, r1, r2, r3) ia64_addp4_pred ((code), 0, r1, r2, r3)
+#define ia64_and(code, r1, r2, r3) ia64_and_pred ((code), 0, r1, r2, r3)
+#define ia64_andcm(code, r1, r2, r3) ia64_andcm_pred ((code), 0, r1, r2, r3)
+#define ia64_or(code, r1, r2, r3) ia64_or_pred ((code), 0, r1, r2, r3)
+#define ia64_xor(code, r1, r2, r3) ia64_xor_pred ((code), 0, r1, r2, r3)
+
+
+#define ia64_shladd(code, r1, r2, r3,count) ia64_shladd_pred ((code), 0, r1, r2, r3,count)
+#define ia64_shladdp4(code, r1, r2, r3,count) ia64_shladdp4_pred ((code), 0, r1, r2, r3,count)
+
+
+#define ia64_sub_imm(code, r1,imm8,r3) ia64_sub_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_and_imm(code, r1,imm8,r3) ia64_and_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_andcm_imm(code, r1,imm8,r3) ia64_andcm_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_or_imm(code, r1,imm8,r3) ia64_or_imm_pred ((code), 0, r1,imm8,r3)
+#define ia64_xor_imm(code, r1,imm8,r3) ia64_xor_imm_pred ((code), 0, r1,imm8,r3)
+
+
+#define ia64_adds_imm(code, r1,imm14,r3) ia64_adds_imm_pred ((code), 0, r1,imm14,r3)
+#define ia64_addp4_imm(code, r1,imm14,r3) ia64_addp4_imm_pred ((code), 0, r1,imm14,r3)
+
+
+#define ia64_addl_imm(code, r1,imm22,r3) ia64_addl_imm_pred ((code), 0, r1,imm22,r3)
+
+
+#define ia64_cmp_lt(code, p1, p2, r2, r3) ia64_cmp_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu(code, p1, p2, r2, r3) ia64_cmp_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq(code, p1, p2, r2, r3) ia64_cmp_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_unc(code, p1, p2, r2, r3) ia64_cmp_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ltu_unc(code, p1, p2, r2, r3) ia64_cmp_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_unc(code, p1, p2, r2, r3) ia64_cmp_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_and(code, p1, p2, r2, r3) ia64_cmp_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or(code, p1, p2, r2, r3) ia64_cmp_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_and(code, p1, p2, r2, r3) ia64_cmp_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or(code, p1, p2, r2, r3) ia64_cmp_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_lt(code, p1, p2, r2, r3) ia64_cmp4_lt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu(code, p1, p2, r2, r3) ia64_cmp4_ltu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq(code, p1, p2, r2, r3) ia64_cmp4_eq_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_unc(code, p1, p2, r2, r3) ia64_cmp4_lt_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ltu_unc(code, p1, p2, r2, r3) ia64_cmp4_ltu_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_unc(code, p1, p2, r2, r3) ia64_cmp4_eq_unc_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_and(code, p1, p2, r2, r3) ia64_cmp4_eq_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or(code, p1, p2, r2, r3) ia64_cmp4_eq_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_eq_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_eq_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_and(code, p1, p2, r2, r3) ia64_cmp4_ne_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or(code, p1, p2, r2, r3) ia64_cmp4_ne_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ne_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ne_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne(code, p1, p2, r2, r3) ia64_cmp_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le(code, p1, p2, r2, r3) ia64_cmp_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt(code, p1, p2, r2, r3) ia64_cmp_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge(code, p1, p2, r2, r3) ia64_cmp_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_leu(code, p1, p2, r2, r3) ia64_cmp_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gtu(code, p1, p2, r2, r3) ia64_cmp_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_geu(code, p1, p2, r2, r3) ia64_cmp_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_ne(code, p1, p2, r2, r3) ia64_cmp4_ne_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le(code, p1, p2, r2, r3) ia64_cmp4_le_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt(code, p1, p2, r2, r3) ia64_cmp4_gt_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge(code, p1, p2, r2, r3) ia64_cmp4_ge_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_leu(code, p1, p2, r2, r3) ia64_cmp4_leu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gtu(code, p1, p2, r2, r3) ia64_cmp4_gtu_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_geu(code, p1, p2, r2, r3) ia64_cmp4_geu_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp_gt_and(code, p1, p2, r2, r3) ia64_cmp_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or(code, p1, p2, r2, r3) ia64_cmp_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_and(code, p1, p2, r2, r3) ia64_cmp_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or(code, p1, p2, r2, r3) ia64_cmp_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_and(code, p1, p2, r2, r3) ia64_cmp_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or(code, p1, p2, r2, r3) ia64_cmp_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_and(code, p1, p2, r2, r3) ia64_cmp_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or(code, p1, p2, r2, r3) ia64_cmp_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+#define ia64_cmp4_gt_and(code, p1, p2, r2, r3) ia64_cmp4_gt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or(code, p1, p2, r2, r3) ia64_cmp4_gt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_gt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_gt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_and(code, p1, p2, r2, r3) ia64_cmp4_le_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or(code, p1, p2, r2, r3) ia64_cmp4_le_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_le_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_le_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_and(code, p1, p2, r2, r3) ia64_cmp4_ge_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or(code, p1, p2, r2, r3) ia64_cmp4_ge_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_ge_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_ge_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_and(code, p1, p2, r2, r3) ia64_cmp4_lt_and_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or(code, p1, p2, r2, r3) ia64_cmp4_lt_or_pred ((code), 0, p1, p2, r2, r3)
+#define ia64_cmp4_lt_or_andcm(code, p1, p2, r2, r3) ia64_cmp4_lt_or_andcm_pred ((code), 0, p1, p2, r2, r3)
+
+
+#define ia64_cmp_lt_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_lt_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_lt_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_lt_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ltu_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_ltu_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_unc_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_unc_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_eq_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_eq_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_and_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_and_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_imm_pred ((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ne_or_andcm_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_or_andcm_imm_pred ((code), 0, p1, p2, imm8, r3)
+
+/* Pseudo ops */
+#define ia64_cmp_ne_imm(code, p1, p2, imm8, r3) ia64_cmp_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_le_imm(code, p1, p2, imm8, r3) ia64_cmp_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gt_imm(code, p1, p2, imm8, r3) ia64_cmp_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_ge_imm(code, p1, p2, imm8, r3) ia64_cmp_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_leu_imm(code, p1, p2, imm8, r3) ia64_cmp_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp_geu_imm(code, p1, p2, imm8, r3) ia64_cmp_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_cmp4_ne_imm(code, p1, p2, imm8, r3) ia64_cmp4_ne_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_le_imm(code, p1, p2, imm8, r3) ia64_cmp4_le_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gt_imm(code, p1, p2, imm8, r3) ia64_cmp4_gt_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_ge_imm(code, p1, p2, imm8, r3) ia64_cmp4_ge_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_leu_imm(code, p1, p2, imm8, r3) ia64_cmp4_leu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_gtu_imm(code, p1, p2, imm8, r3) ia64_cmp4_gtu_imm_pred((code), 0, p1, p2, imm8, r3)
+#define ia64_cmp4_geu_imm(code, p1, p2, imm8, r3) ia64_cmp4_geu_imm_pred((code), 0, p1, p2, imm8, r3)
+
+#define ia64_padd1(code, r1,r2,r3) ia64_padd1_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2(code, r1,r2,r3) ia64_padd2_pred ((code), 0, r1,r2,r3)
+#define ia64_padd4(code, r1,r2,r3) ia64_padd4_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_sss(code, r1,r2,r3) ia64_padd1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_sss(code, r1,r2,r3) ia64_padd2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uuu(code, r1,r2,r3) ia64_padd1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uuu(code, r1,r2,r3) ia64_padd2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_padd1_uus(code, r1,r2,r3) ia64_padd1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_padd2_uus(code, r1,r2,r3) ia64_padd2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_psub1(code, r1,r2,r3) ia64_psub1_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2(code, r1,r2,r3) ia64_psub2_pred ((code), 0, r1,r2,r3)
+#define ia64_psub4(code, r1,r2,r3) ia64_psub4_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_sss(code, r1,r2,r3) ia64_psub1_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_sss(code, r1,r2,r3) ia64_psub2_sss_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uuu(code, r1,r2,r3) ia64_psub1_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uuu(code, r1,r2,r3) ia64_psub2_uuu_pred ((code), 0, r1,r2,r3)
+#define ia64_psub1_uus(code, r1,r2,r3) ia64_psub1_uus_pred ((code), 0, r1,r2,r3)
+#define ia64_psub2_uus(code, r1,r2,r3) ia64_psub2_uus_pred ((code), 0, r1,r2,r3)
+
+#define ia64_pavg1(code, r1,r2,r3) ia64_pavg1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2(code, r1,r2,r3) ia64_pavg2_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg1_raz(code, r1,r2,r3) ia64_pavg1_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavg2_raz(code, r1,r2,r3) ia64_pavg2_raz_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub1(code, r1,r2,r3) ia64_pavgsub1_pred ((code), 0, r1,r2,r3)
+#define ia64_pavgsub2(code, r1,r2,r3) ia64_pavgsub2_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_eq(code, r1,r2,r3) ia64_pcmp1_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_eq(code, r1,r2,r3) ia64_pcmp2_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_eq(code, r1,r2,r3) ia64_pcmp4_eq_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp1_gt(code, r1,r2,r3) ia64_pcmp1_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp2_gt(code, r1,r2,r3) ia64_pcmp2_gt_pred ((code), 0, r1,r2,r3)
+#define ia64_pcmp4_gt(code, r1,r2,r3) ia64_pcmp4_gt_pred ((code), 0, r1,r2,r3)
+
+
+#define ia64_pshladd2(code, r1, r2, r3, count) ia64_pshladd2_pred ((code), 0, r1, r2, r3, count)
+#define ia64_pshradd2(code, r1, r2, r3, count) ia64_pshradd2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2(code, r1, r2, r3, count) ia64_pmpyshr2_pred ((code), 0, r1, r2, r3, count)
+
+#define ia64_pmpyshr2_u(code, r1, r2, r3, count) ia64_pmpyshr2_u_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_pmpy2_r(code, r1, r2, r3) ia64_pmpy2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_pmpy2_l(code, r1, r2, r3) ia64_pmpy2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_r(code, r1, r2, r3) ia64_mix1_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_r(code, r1, r2, r3) ia64_mix2_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_r(code, r1, r2, r3) ia64_mix4_r_pred ((code), 0, r1, r2, r3)
+#define ia64_mix1_l(code, r1, r2, r3) ia64_mix1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix2_l(code, r1, r2, r3) ia64_mix2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_mix4_l(code, r1, r2, r3) ia64_mix4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_uss(code, r1, r2, r3) ia64_pack2_uss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack2_sss(code, r1, r2, r3) ia64_pack2_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_pack4_sss(code, r1, r2, r3) ia64_pack4_sss_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_h(code, r1, r2, r3) ia64_unpack1_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_h(code, r1, r2, r3) ia64_unpack2_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_h(code, r1, r2, r3) ia64_unpack4_h_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack1_l(code, r1, r2, r3) ia64_unpack1_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack2_l(code, r1, r2, r3) ia64_unpack2_l_pred ((code), 0, r1, r2, r3)
+#define ia64_unpack4_l(code, r1, r2, r3) ia64_unpack4_l_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin1_u(code, r1, r2, r3) ia64_pmin1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax1_u(code, r1, r2, r3) ia64_pmax1_u_pred ((code), 0, r1, r2, r3)
+#define ia64_pmin2(code, r1, r2, r3) ia64_pmin2_pred ((code), 0, r1, r2, r3)
+#define ia64_pmax2(code, r1, r2, r3) ia64_pmax2_pred ((code), 0, r1, r2, r3)
+#define ia64_psad1(code, r1, r2, r3) ia64_psad1_pred ((code), 0, r1, r2, r3)
+
+#define ia64_mux1(code, r1, r2, mbtype) ia64_mux1_pred ((code), 0, r1, r2, mbtype)
+
+
+#define ia64_mux2(code, r1, r2, mhtype) ia64_mux2_pred ((code), 0, r1, r2, mhtype)
+
+
+#define ia64_pshr2(code, r1, r3, r2) ia64_pshr2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4(code, r1, r3, r2) ia64_pshr4_pred ((code), 0, r1, r3, r2)
+#define ia64_shr(code, r1, r3, r2) ia64_shr_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr2_u(code, r1, r3, r2) ia64_pshr2_u_pred ((code), 0, r1, r3, r2)
+#define ia64_pshr4_u(code, r1, r3, r2) ia64_pshr4_u_pred ((code), 0, r1, r3, r2)
+#define ia64_shr_u(code, r1, r3, r2) ia64_shr_u_pred ((code), 0, r1, r3, r2)
+
+
+#define ia64_pshr2_imm(code, r1, r3, count) ia64_pshr2_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_imm(code, r1, r3, count) ia64_pshr4_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr2_u_imm(code, r1, r3, count) ia64_pshr2_u_imm_pred ((code), 0, r1, r3, count)
+#define ia64_pshr4_u_imm(code, r1, r3, count) ia64_pshr4_u_imm_pred ((code), 0, r1, r3, count)
+
+
+#define ia64_pshl2(code, r1, r3, r2) ia64_pshl2_pred ((code), 0, r1, r3, r2)
+#define ia64_pshl4(code, r1, r3, r2) ia64_pshl4_pred ((code), 0, r1, r3, r2)
+#define ia64_shl(code, r1, r3, r2) ia64_shl_pred ((code), 0, r1, r3, r2)
+
+#define ia64_shl_imm(code, r1, r3, count) ia64_dep_z ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_imm(code, r1, r3, count) ia64_extr ((code), (r1), (r3), count, 64 - count)
+#define ia64_shr_u_imm(code, r1, r3, count) ia64_extr_u ((code), (r1), (r3), count, 64 - count)
+
+#define ia64_pshl2_imm(code, r1, r2, count) ia64_pshl2_imm_pred ((code), 0, r1, r2, count)
+#define ia64_pshl4_imm(code, r1, r2, count) ia64_pshl4_imm_pred ((code), 0, r1, r2, count)
+
+
+#define ia64_popcnt(code, r1, r3) ia64_popcnt_pred ((code), 0, r1, r3)
+
+
+#define ia64_shrp(code, r1, r2, r3, count) ia64_shrp_pred ((code), 0, r1, r2, r3, count)
+
+
+#define ia64_extr_u(code, r1, r3, pos, len) ia64_extr_u_pred ((code), 0, r1, r3, pos, len)
+#define ia64_extr(code, r1, r3, pos, len) ia64_extr_pred ((code), 0, r1, r3, pos, len)
+
+
+#define ia64_dep_z(code, r1, r2, pos, len) ia64_dep_z_pred ((code), 0, r1, r2, pos, len)
+
+
+#define ia64_dep_z_imm(code, r1, imm, pos, len) ia64_dep_z_imm_pred ((code), 0, r1, imm, pos, len)
+
+
+#define ia64_dep_imm(code, r1, imm, r3, pos, len) ia64_dep_imm_pred ((code), 0, r1, imm, r3, pos, len)
+
+
+#define ia64_dep(code, r1, r2, r3, pos, len) ia64_dep_pred ((code), 0, r1, r2, r3, pos, len)
+
+
+#define ia64_tbit_z(code, p1, p2, r3, pos) ia64_tbit_z_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_unc(code, p1, p2, r3, pos) ia64_tbit_z_unc_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_and(code, p1, p2, r3, pos) ia64_tbit_z_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_and(code, p1, p2, r3, pos) ia64_tbit_nz_and_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or(code, p1, p2, r3, pos) ia64_tbit_z_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or(code, p1, p2, r3, pos) ia64_tbit_nz_or_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_z_or_andcm(code, p1, p2, r3, pos) ia64_tbit_z_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+#define ia64_tbit_nz_or_andcm(code, p1, p2, r3, pos) ia64_tbit_nz_or_andcm_pred ((code), 0, p1, p2, r3, pos)
+
+
+#define ia64_tnat_z(code, p1, p2, r3) ia64_tnat_z_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_unc(code, p1, p2, r3) ia64_tnat_z_unc_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_and(code, p1, p2, r3) ia64_tnat_z_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_and(code, p1, p2, r3) ia64_tnat_nz_and_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or(code, p1, p2, r3) ia64_tnat_z_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or(code, p1, p2, r3) ia64_tnat_nz_or_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_z_or_andcm(code, p1, p2, r3) ia64_tnat_z_or_andcm_pred ((code), 0, p1, p2, r3)
+#define ia64_tnat_nz_or_andcm(code, p1, p2, r3) ia64_tnat_nz_or_andcm_pred ((code), 0, p1, p2, r3)
+
+
+#define ia64_nop_i(code, imm) ia64_nop_i_pred ((code), 0, imm)
+#define ia64_hint_i(code, imm) ia64_hint_i_pred ((code), 0, imm)
+
+
+#define ia64_break_i(code, imm) ia64_break_i_pred ((code), 0, imm)
+
+
+#define ia64_chk_s_i(code, r2,disp) ia64_chk_s_i_pred ((code), 0, r2,disp)
+
+#define ia64_mov_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+#define ia64_mov_ret_to_br_hint(code, b1, r2, disp, wh, ih) ia64_mov_ret_to_br_hint_pred ((code), 0, b1, r2, disp, wh, ih)
+
+/* Pseudo ops */
+
+#define ia64_mov_to_br(code, b1, r2) ia64_mov_to_br_pred ((code), 0, (b1), (r2))
+#define ia64_mov_ret_to_br(code, b1, r2) ia64_mov_ret_to_br_pred ((code), 0, (b1), (r2))
+
+/* End of pseudo ops */
+
+#define ia64_mov_from_br(code, r1, b2) ia64_mov_from_br_pred ((code), 0, r1, b2)
+
+
+#define ia64_mov_to_pred(code, r2, mask) ia64_mov_to_pred_pred ((code), 0, r2, mask)
+
+
+#define ia64_mov_to_pred_rot_imm(code, imm) ia64_mov_to_pred_rot_imm_pred ((code), 0, imm)
+
+
+#define ia64_mov_from_ip(code, r1) ia64_mov_from_ip_pred ((code), 0, r1)
+#define ia64_mov_from_pred(code, r1) ia64_mov_from_pred_pred ((code), 0, r1)
+
+
+#define ia64_mov_to_ar_i(code, ar3, r2) ia64_mov_to_ar_i_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_i(code, ar3, imm) ia64_mov_to_ar_imm_i_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_i(code, r1, ar3) ia64_mov_from_ar_i_pred ((code), 0, r1, ar3)
+
+
+#define ia64_zxt1(code, r1, r3) ia64_zxt1_pred ((code), 0, r1, r3)
+#define ia64_zxt2(code, r1, r3) ia64_zxt2_pred ((code), 0, r1, r3)
+#define ia64_zxt4(code, r1, r3) ia64_zxt4_pred ((code), 0, r1, r3)
+#define ia64_sxt1(code, r1, r3) ia64_sxt1_pred ((code), 0, r1, r3)
+#define ia64_sxt2(code, r1, r3) ia64_sxt2_pred ((code), 0, r1, r3)
+#define ia64_sxt4(code, r1, r3) ia64_sxt4_pred ((code), 0, r1, r3)
+#define ia64_czx1_l(code, r1, r3) ia64_czx1_l_pred ((code), 0, r1, r3)
+#define ia64_czx2_l(code, r1, r3) ia64_czx2_l_pred ((code), 0, r1, r3)
+#define ia64_czx1_r(code, r1, r3) ia64_czx1_r_pred ((code), 0, r1, r3)
+#define ia64_czx2_r(code, r1, r3) ia64_czx2_r_pred ((code), 0, r1, r3)
+
+#define ia64_ld1_hint(code, r1, r3, hint) ia64_ld1_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_hint(code, r1, r3, hint) ia64_ld2_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_hint(code, r1, r3, hint) ia64_ld4_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_hint(code, r1, r3, hint) ia64_ld8_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_s_hint(code, r1, r3, hint) ia64_ld1_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_s_hint(code, r1, r3, hint) ia64_ld2_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_s_hint(code, r1, r3, hint) ia64_ld4_s_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_s_hint(code, r1, r3, hint) ia64_ld8_s_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_a_hint(code, r1, r3, hint) ia64_ld1_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_a_hint(code, r1, r3, hint) ia64_ld2_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_a_hint(code, r1, r3, hint) ia64_ld4_a_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_a_hint(code, r1, r3, hint) ia64_ld8_a_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_sa_hint(code, r1, r3, hint) ia64_ld1_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_sa_hint(code, r1, r3, hint) ia64_ld2_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_sa_hint(code, r1, r3, hint) ia64_ld4_sa_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_sa_hint(code, r1, r3, hint) ia64_ld8_sa_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_bias_hint(code, r1, r3, hint) ia64_ld1_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_bias_hint(code, r1, r3, hint) ia64_ld2_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_bias_hint(code, r1, r3, hint) ia64_ld4_bias_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_bias_hint(code, r1, r3, hint) ia64_ld8_bias_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_acq_hint(code, r1, r3, hint) ia64_ld1_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_acq_hint(code, r1, r3, hint) ia64_ld2_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_acq_hint(code, r1, r3, hint) ia64_ld4_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_acq_hint(code, r1, r3, hint) ia64_ld8_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld8_fill_hint(code, r1, r3, hint) ia64_ld8_fill_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_hint(code, r1, r3, hint) ia64_ld1_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_hint(code, r1, r3, hint) ia64_ld2_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_hint(code, r1, r3, hint) ia64_ld4_c_clr_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_hint(code, r1, r3, hint) ia64_ld8_c_clr_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_nc_hint(code, r1, r3, hint) ia64_ld1_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_nc_hint(code, r1, r3, hint) ia64_ld2_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_nc_hint(code, r1, r3, hint) ia64_ld4_c_nc_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_nc_hint(code, r1, r3, hint) ia64_ld8_c_nc_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld1_c_clr_acq_hint(code, r1, r3, hint) ia64_ld1_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld2_c_clr_acq_hint(code, r1, r3, hint) ia64_ld2_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld4_c_clr_acq_hint(code, r1, r3, hint) ia64_ld4_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld8_c_clr_acq_hint(code, r1, r3, hint) ia64_ld8_c_clr_acq_hint_pred ((code), 0, r1, r3, hint)
+
+#define ia64_ld16_hint(code, r1, r3, hint) ia64_ld16_hint_pred ((code), 0, r1, r3, hint)
+#define ia64_ld16_acq_hint(code, r1, r3, hint) ia64_ld16_acq_hint_pred ((code), 0, r1, r3, hint)
+
+
+#define ia64_ld1_inc_hint(code, r1, r2, r3, hint) ia64_ld1_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_inc_hint(code, r1, r2, r3, hint) ia64_ld2_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_inc_hint(code, r1, r2, r3, hint) ia64_ld4_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_inc_hint(code, r1, r2, r3, hint) ia64_ld8_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_s_inc_hint(code, r1, r2, r3, hint) ia64_ld1_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_s_inc_hint(code, r1, r2, r3, hint) ia64_ld2_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_s_inc_hint(code, r1, r2, r3, hint) ia64_ld4_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_s_inc_hint(code, r1, r2, r3, hint) ia64_ld8_s_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_a_inc_hint(code, r1, r2, r3, hint) ia64_ld1_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_a_inc_hint(code, r1, r2, r3, hint) ia64_ld2_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_a_inc_hint(code, r1, r2, r3, hint) ia64_ld4_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_a_inc_hint(code, r1, r2, r3, hint) ia64_ld8_a_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld1_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld2_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld4_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_sa_inc_hint(code, r1, r2, r3, hint) ia64_ld8_sa_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld1_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld2_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld4_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_bias_inc_hint(code, r1, r2, r3, hint) ia64_ld8_bias_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld8_fill_inc_hint(code, r1, r2, r3, hint) ia64_ld8_fill_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_nc_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_nc_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+#define ia64_ld1_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld1_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld2_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld2_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld4_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld4_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+#define ia64_ld8_c_clr_acq_inc_hint(code, r1, r2, r3, hint) ia64_ld8_c_clr_acq_inc_hint_pred ((code), 0, r1, r2, r3, hint)
+
+
+#define ia64_ld1_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_s_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_s_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_a_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_a_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_sa_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_sa_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_bias_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_bias_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld8_fill_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_fill_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_nc_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_nc_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+#define ia64_ld1_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld1_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld2_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld2_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld4_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld4_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+#define ia64_ld8_c_clr_acq_inc_imm_hint(code, r1, r3, imm, hint) ia64_ld8_c_clr_acq_inc_imm_hint_pred ((code), 0, r1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ld1(code, r1, r3) ia64_ld1_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2(code, r1, r3) ia64_ld2_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4(code, r1, r3) ia64_ld4_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8(code, r1, r3) ia64_ld8_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_s(code, r1, r3) ia64_ld1_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_s(code, r1, r3) ia64_ld2_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_s(code, r1, r3) ia64_ld4_s_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_s(code, r1, r3) ia64_ld8_s_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_a(code, r1, r3) ia64_ld1_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_a(code, r1, r3) ia64_ld2_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_a(code, r1, r3) ia64_ld4_a_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_a(code, r1, r3) ia64_ld8_a_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_sa(code, r1, r3) ia64_ld1_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_sa(code, r1, r3) ia64_ld2_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_sa(code, r1, r3) ia64_ld4_sa_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_sa(code, r1, r3) ia64_ld8_sa_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_bias(code, r1, r3) ia64_ld1_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_bias(code, r1, r3) ia64_ld2_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_bias(code, r1, r3) ia64_ld4_bias_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_bias(code, r1, r3) ia64_ld8_bias_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_acq(code, r1, r3) ia64_ld1_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_acq(code, r1, r3) ia64_ld2_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_acq(code, r1, r3) ia64_ld4_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_acq(code, r1, r3) ia64_ld8_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld8_fill(code, r1, r3) ia64_ld8_fill_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr(code, r1, r3) ia64_ld1_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr(code, r1, r3) ia64_ld2_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr(code, r1, r3) ia64_ld4_c_clr_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr(code, r1, r3) ia64_ld8_c_clr_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_nc(code, r1, r3) ia64_ld1_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_nc(code, r1, r3) ia64_ld2_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_nc(code, r1, r3) ia64_ld4_c_nc_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_nc(code, r1, r3) ia64_ld8_c_nc_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_c_clr_acq(code, r1, r3) ia64_ld1_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld2_c_clr_acq(code, r1, r3) ia64_ld2_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld4_c_clr_acq(code, r1, r3) ia64_ld4_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld8_c_clr_acq(code, r1, r3) ia64_ld8_c_clr_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld16(code, r1, r3) ia64_ld16_hint_pred (code, 0, r1, r3, 0)
+#define ia64_ld16_acq(code, r1, r3) ia64_ld16_acq_hint_pred (code, 0, r1, r3, 0)
+
+#define ia64_ld1_inc(code, r1, r2, r3) ia64_ld1_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_inc(code, r1, r2, r3) ia64_ld2_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_inc(code, r1, r2, r3) ia64_ld4_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_inc(code, r1, r2, r3) ia64_ld8_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_s_inc(code, r1, r2, r3) ia64_ld1_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_s_inc(code, r1, r2, r3) ia64_ld2_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_s_inc(code, r1, r2, r3) ia64_ld4_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_s_inc(code, r1, r2, r3) ia64_ld8_s_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_a_inc(code, r1, r2, r3) ia64_ld1_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_a_inc(code, r1, r2, r3) ia64_ld2_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_a_inc(code, r1, r2, r3) ia64_ld4_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_a_inc(code, r1, r2, r3) ia64_ld8_a_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_sa_inc(code, r1, r2, r3) ia64_ld1_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_sa_inc(code, r1, r2, r3) ia64_ld2_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_sa_inc(code, r1, r2, r3) ia64_ld4_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_sa_inc(code, r1, r2, r3) ia64_ld8_sa_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_bias_inc(code, r1, r2, r3) ia64_ld1_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_bias_inc(code, r1, r2, r3) ia64_ld2_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_bias_inc(code, r1, r2, r3) ia64_ld4_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_bias_inc(code, r1, r2, r3) ia64_ld8_bias_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_acq_inc(code, r1, r2, r3) ia64_ld1_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_acq_inc(code, r1, r2, r3) ia64_ld2_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_acq_inc(code, r1, r2, r3) ia64_ld4_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_acq_inc(code, r1, r2, r3) ia64_ld8_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld8_fill_inc(code, r1, r2, r3) ia64_ld8_fill_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_inc(code, r1, r2, r3) ia64_ld1_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_inc(code, r1, r2, r3) ia64_ld2_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_inc(code, r1, r2, r3) ia64_ld4_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_inc(code, r1, r2, r3) ia64_ld8_c_clr_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_nc_inc(code, r1, r2, r3) ia64_ld1_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_nc_inc(code, r1, r2, r3) ia64_ld2_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_nc_inc(code, r1, r2, r3) ia64_ld4_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_nc_inc(code, r1, r2, r3) ia64_ld8_c_nc_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_c_clr_acq_inc(code, r1, r2, r3) ia64_ld1_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld2_c_clr_acq_inc(code, r1, r2, r3) ia64_ld2_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld4_c_clr_acq_inc(code, r1, r2, r3) ia64_ld4_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+#define ia64_ld8_c_clr_acq_inc(code, r1, r2, r3) ia64_ld8_c_clr_acq_inc_hint_pred (code, 0, r1, r2, r3, 0)
+
+#define ia64_ld1_inc_imm(code, r1, r3, imm) ia64_ld1_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_inc_imm(code, r1, r3, imm) ia64_ld2_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_inc_imm(code, r1, r3, imm) ia64_ld4_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_inc_imm(code, r1, r3, imm) ia64_ld8_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_s_inc_imm(code, r1, r3, imm) ia64_ld1_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_s_inc_imm(code, r1, r3, imm) ia64_ld2_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_s_inc_imm(code, r1, r3, imm) ia64_ld4_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_s_inc_imm(code, r1, r3, imm) ia64_ld8_s_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_a_inc_imm(code, r1, r3, imm) ia64_ld1_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_a_inc_imm(code, r1, r3, imm) ia64_ld2_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_a_inc_imm(code, r1, r3, imm) ia64_ld4_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_a_inc_imm(code, r1, r3, imm) ia64_ld8_a_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_sa_inc_imm(code, r1, r3, imm) ia64_ld1_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_sa_inc_imm(code, r1, r3, imm) ia64_ld2_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_sa_inc_imm(code, r1, r3, imm) ia64_ld4_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_sa_inc_imm(code, r1, r3, imm) ia64_ld8_sa_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_bias_inc_imm(code, r1, r3, imm) ia64_ld1_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_bias_inc_imm(code, r1, r3, imm) ia64_ld2_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_bias_inc_imm(code, r1, r3, imm) ia64_ld4_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_bias_inc_imm(code, r1, r3, imm) ia64_ld8_bias_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_acq_inc_imm(code, r1, r3, imm) ia64_ld1_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_acq_inc_imm(code, r1, r3, imm) ia64_ld2_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_acq_inc_imm(code, r1, r3, imm) ia64_ld4_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_acq_inc_imm(code, r1, r3, imm) ia64_ld8_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld8_fill_inc_imm(code, r1, r3, imm) ia64_ld8_fill_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_nc_inc_imm(code, r1, r3, imm) ia64_ld1_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_nc_inc_imm(code, r1, r3, imm) ia64_ld2_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_nc_inc_imm(code, r1, r3, imm) ia64_ld4_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_nc_inc_imm(code, r1, r3, imm) ia64_ld8_c_nc_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+#define ia64_ld1_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld1_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld2_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld2_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld4_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld4_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+#define ia64_ld8_c_clr_acq_inc_imm(code, r1, r3, imm) ia64_ld8_c_clr_acq_inc_imm_hint_pred (code, 0, r1, r3, imm, 0)
+
+/* End of pseudo ops */
+
+#define ia64_st1_hint(code, r3, r2, hint) ia64_st1_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_hint(code, r3, r2, hint) ia64_st2_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_hint(code, r3, r2, hint) ia64_st4_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_hint(code, r3, r2, hint) ia64_st8_hint_pred ((code), 0, r3, r2, hint)
+
+/* Pseudo ops */
+#define ia64_st8(code, r3, r2) ia64_st8_hint ((code), (r3), (r2), 0)
+
+#define ia64_st1_rel_hint(code, r3, r2, hint) ia64_st1_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st2_rel_hint(code, r3, r2, hint) ia64_st2_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st4_rel_hint(code, r3, r2, hint) ia64_st4_rel_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st8_rel_hint(code, r3, r2, hint) ia64_st8_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st8_spill_hint(code, r3, r2, hint) ia64_st8_spill_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st16_hint(code, r3, r2, hint) ia64_st16_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_st16_rel_hint(code, r3, r2, hint) ia64_st16_rel_hint_pred ((code), 0, r3, r2, hint)
+
+#define ia64_st1_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st1_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st1_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st2_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st2_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st4_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st4_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+#define ia64_st8_rel_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_rel_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+#define ia64_st8_spill_inc_imm_hint(code, r3, r2, imm, hint) ia64_st8_spill_inc_imm_hint_pred ((code), 0, r3, r2, imm, hint)
+
+
+#define ia64_ldfs_hint(code, f1, r3, hint) ia64_ldfs_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_hint(code, f1, r3, hint) ia64_ldfd_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_hint(code, f1, r3, hint) ia64_ldf8_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_hint(code, f1, r3, hint) ia64_ldfe_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_s_hint(code, f1, r3, hint) ia64_ldfs_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_s_hint(code, f1, r3, hint) ia64_ldfd_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_s_hint(code, f1, r3, hint) ia64_ldf8_s_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_s_hint(code, f1, r3, hint) ia64_ldfe_s_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_a_hint(code, f1, r3, hint) ia64_ldfs_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_a_hint(code, f1, r3, hint) ia64_ldfd_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_a_hint(code, f1, r3, hint) ia64_ldf8_a_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_a_hint(code, f1, r3, hint) ia64_ldfe_a_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_sa_hint(code, f1, r3, hint) ia64_ldfs_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_sa_hint(code, f1, r3, hint) ia64_ldfd_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_sa_hint(code, f1, r3, hint) ia64_ldf8_sa_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_sa_hint(code, f1, r3, hint) ia64_ldfe_sa_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_clr_hint(code, f1, r3, hint) ia64_ldfs_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_clr_hint(code, f1, r3, hint) ia64_ldfd_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_clr_hint(code, f1, r3, hint) ia64_ldf8_c_clr_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_clr_hint(code, f1, r3, hint) ia64_ldfe_c_clr_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldfs_c_nc_hint(code, f1, r3, hint) ia64_ldfs_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfd_c_nc_hint(code, f1, r3, hint) ia64_ldfd_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldf8_c_nc_hint(code, f1, r3, hint) ia64_ldf8_c_nc_hint_pred ((code), 0, f1, r3, hint)
+#define ia64_ldfe_c_nc_hint(code, f1, r3, hint) ia64_ldfe_c_nc_hint_pred ((code), 0, f1, r3, hint)
+
+#define ia64_ldf_fill_hint(code, f1, r3, hint) ia64_ldf_fill_hint_pred ((code), 0, f1, r3, hint)
+
+
+#define ia64_ldfs_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_s_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_s_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_s_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_a_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_a_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_a_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_sa_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_sa_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_clr_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_clr_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldfs_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfs_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfd_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfd_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldf8_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldf8_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+#define ia64_ldfe_c_nc_inc_hint(code, f1, r3, r2, hint) ia64_ldfe_c_nc_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+#define ia64_ldf_fill_inc_hint(code, f1, r3, r2, hint) ia64_ldf_fill_inc_hint_pred ((code), 0, f1, r3, r2, hint)
+
+
+#define ia64_ldfs_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_s_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_s_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_a_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_a_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_sa_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_sa_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_clr_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_clr_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldfs_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfs_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfd_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfd_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldf8_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf8_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+#define ia64_ldfe_c_nc_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldfe_c_nc_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+#define ia64_ldf_fill_inc_imm_hint(code, f1, r3, imm, hint) ia64_ldf_fill_inc_imm_hint_pred ((code), 0, f1, r3, imm, hint)
+
+/* Pseudo ops */
+
+#define ia64_ldfs(code, f1, r3) ia64_ldfs_pred (code, 0, f1, r3)
+#define ia64_ldfd(code, f1, r3) ia64_ldfd_pred (code, 0, f1, r3)
+#define ia64_ldf8(code, f1, r3) ia64_ldf8_pred (code, 0, f1, r3)
+#define ia64_ldfe(code, f1, r3) ia64_ldfe_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_s(code, f1, r3) ia64_ldfs_s_pred (code, 0, f1, r3)
+#define ia64_ldfd_s(code, f1, r3) ia64_ldfd_s_pred (code, 0, f1, r3)
+#define ia64_ldf8_s(code, f1, r3) ia64_ldf8_s_pred (code, 0, f1, r3)
+#define ia64_ldfe_s(code, f1, r3) ia64_ldfe_s_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_a(code, f1, r3) ia64_ldfs_a_pred (code, 0, f1, r3)
+#define ia64_ldfd_a(code, f1, r3) ia64_ldfd_a_pred (code, 0, f1, r3)
+#define ia64_ldf8_a(code, f1, r3) ia64_ldf8_a_pred (code, 0, f1, r3)
+#define ia64_ldfe_a(code, f1, r3) ia64_ldfe_a_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_sa(code, f1, r3) ia64_ldfs_sa_pred (code, 0, f1, r3)
+#define ia64_ldfd_sa(code, f1, r3) ia64_ldfd_sa_pred (code, 0, f1, r3)
+#define ia64_ldf8_sa(code, f1, r3) ia64_ldf8_sa_pred (code, 0, f1, r3)
+#define ia64_ldfe_sa(code, f1, r3) ia64_ldfe_sa_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_clr(code, f1, r3) ia64_ldfs_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_clr(code, f1, r3) ia64_ldfd_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_clr(code, f1, r3) ia64_ldf8_c_clr_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_clr(code, f1, r3) ia64_ldfe_c_clr_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_c_nc(code, f1, r3) ia64_ldfs_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfd_c_nc(code, f1, r3) ia64_ldfd_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldf8_c_nc(code, f1, r3) ia64_ldf8_c_nc_pred (code, 0, f1, r3)
+#define ia64_ldfe_c_nc(code, f1, r3) ia64_ldfe_c_nc_pred (code, 0, f1, r3)
+
+#define ia64_ldf_fill(code, f1, r3) ia64_ldf_fill_pred (code, 0, f1, r3)
+
+#define ia64_ldfs_inc(code, f1, r3, r2) ia64_ldfs_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_inc(code, f1, r3, r2) ia64_ldfd_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_inc(code, f1, r3, r2) ia64_ldf8_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_inc(code, f1, r3, r2) ia64_ldfe_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_s_inc(code, f1, r3, r2) ia64_ldfs_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_s_inc(code, f1, r3, r2) ia64_ldfd_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_s_inc(code, f1, r3, r2) ia64_ldf8_s_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_s_inc(code, f1, r3, r2) ia64_ldfe_s_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_a_inc(code, f1, r3, r2) ia64_ldfs_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_a_inc(code, f1, r3, r2) ia64_ldfd_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_a_inc(code, f1, r3, r2) ia64_ldf8_a_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_a_inc(code, f1, r3, r2) ia64_ldfe_a_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_sa_inc(code, f1, r3, r2) ia64_ldfs_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_sa_inc(code, f1, r3, r2) ia64_ldfd_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_sa_inc(code, f1, r3, r2) ia64_ldf8_sa_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_sa_inc(code, f1, r3, r2) ia64_ldfe_sa_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_clr_inc(code, f1, r3, r2) ia64_ldfs_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_clr_inc(code, f1, r3, r2) ia64_ldfd_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_clr_inc(code, f1, r3, r2) ia64_ldf8_c_clr_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_clr_inc(code, f1, r3, r2) ia64_ldfe_c_clr_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_c_nc_inc(code, f1, r3, r2) ia64_ldfs_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfd_c_nc_inc(code, f1, r3, r2) ia64_ldfd_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldf8_c_nc_inc(code, f1, r3, r2) ia64_ldf8_c_nc_inc_pred (code, 0, f1, r3, r2)
+#define ia64_ldfe_c_nc_inc(code, f1, r3, r2) ia64_ldfe_c_nc_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldf_fill_inc(code, f1, r3, r2) ia64_ldf_fill_inc_pred (code, 0, f1, r3, r2)
+
+#define ia64_ldfs_inc_imm(code, f1, r3, imm) ia64_ldfs_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_inc_imm(code, f1, r3, imm) ia64_ldfd_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_inc_imm(code, f1, r3, imm) ia64_ldf8_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_inc_imm(code, f1, r3, imm) ia64_ldfe_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_s_inc_imm(code, f1, r3, imm) ia64_ldfs_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_s_inc_imm(code, f1, r3, imm) ia64_ldfd_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_s_inc_imm(code, f1, r3, imm) ia64_ldf8_s_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_s_inc_imm(code, f1, r3, imm) ia64_ldfe_s_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_a_inc_imm(code, f1, r3, imm) ia64_ldfs_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_a_inc_imm(code, f1, r3, imm) ia64_ldfd_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_a_inc_imm(code, f1, r3, imm) ia64_ldf8_a_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_a_inc_imm(code, f1, r3, imm) ia64_ldfe_a_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_sa_inc_imm(code, f1, r3, imm) ia64_ldfs_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_sa_inc_imm(code, f1, r3, imm) ia64_ldfd_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_sa_inc_imm(code, f1, r3, imm) ia64_ldf8_sa_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_sa_inc_imm(code, f1, r3, imm) ia64_ldfe_sa_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfs_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfd_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_clr_inc_imm(code, f1, r3, imm) ia64_ldf8_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_clr_inc_imm(code, f1, r3, imm) ia64_ldfe_c_clr_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldfs_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfs_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfd_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfd_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldf8_c_nc_inc_imm(code, f1, r3, imm) ia64_ldf8_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+#define ia64_ldfe_c_nc_inc_imm(code, f1, r3, imm) ia64_ldfe_c_nc_inc_imm_pred (code, 0, f1, r3, imm)
+
+#define ia64_ldf_fill_inc_imm(code, f1, r3, imm) ia64_ldf_fill_inc_imm_pred (code, 0, f1, r3, imm)
+
+/* End of pseudo ops */
+
+#define ia64_stfs_hint(code, r3, f2, hint) ia64_stfs_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfd_hint(code, r3, f2, hint) ia64_stfd_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf8_hint(code, r3, f2, hint) ia64_stf8_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stfe_hint(code, r3, f2, hint) ia64_stfe_hint_pred ((code), 0, r3, f2, hint)
+#define ia64_stf_spill_hint(code, r3, f2, hint) ia64_stf_spill_hint_pred ((code), 0, r3, f2, hint)
+
+
+#define ia64_stfs_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfs_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfd_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfd_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf8_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf8_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stfe_inc_imm_hint(code, r3, f2, imm, hint) ia64_stfe_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+#define ia64_stf_spill_inc_imm_hint(code, r3, f2, imm, hint) ia64_stf_spill_inc_imm_hint_pred ((code), 0, r3, f2, imm, hint)
+
+
+#define ia64_ldfps_hint(code, f1, f2, r3, hint) ia64_ldfps_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_hint(code, f1, f2, r3, hint) ia64_ldfpd_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_hint(code, f1, f2, r3, hint) ia64_ldfp8_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_hint(code, f1, f2, r3, hint) ia64_ldfps_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_hint(code, f1, f2, r3, hint) ia64_ldfps_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+
+#define ia64_ldfps_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_s_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_s_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_a_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_a_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_sa_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_sa_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_clr_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_clr_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_ldfps_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfps_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfpd_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfpd_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+#define ia64_ldfp8_c_nc_inc_hint(code, f1, f2, r3, hint) ia64_ldfp8_c_nc_inc_hint_pred ((code), 0, f1, f2, r3, hint)
+
+#define ia64_lfetch_hint(code, r3, hint) ia64_lfetch_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_excl_hint(code, r3, hint) ia64_lfetch_excl_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_hint(code, r3, hint) ia64_lfetch_fault_hint_pred ((code), 0, r3, hint)
+#define ia64_lfetch_fault_excl_hint(code, r3, hint) ia64_lfetch_fault_excl_hint_pred ((code), 0, r3, hint)
+
+
+#define ia64_lfetch_inc_hint(code, r3, r2, hint) ia64_lfetch_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_inc_hint_pred ((code), 0, r3, r2, hint)
+#define ia64_lfetch_fault_excl_inc_hint(code, r3, r2, hint) ia64_lfetch_fault_excl_inc_hint_pred ((code), 0, r3, r2, hint)
+
+
+#define ia64_lfetch_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+#define ia64_lfetch_fault_excl_inc_imm_hint(code, r3, imm, hint) ia64_lfetch_fault_excl_inc_imm_hint_pred ((code), 0, r3, imm, hint)
+
+
+#define ia64_cmpxchg1_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg1_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg1_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg2_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg2_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg4_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg4_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg8_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg8_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_acq_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_acq_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_cmpxchg16_rel_hint(code, r1, r3, r2, hint) ia64_cmpxchg16_rel_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg1_hint(code, r1, r3, r2, hint) ia64_xchg1_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg2_hint(code, r1, r3, r2, hint) ia64_xchg2_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg4_hint(code, r1, r3, r2, hint) ia64_xchg4_hint_pred ((code), 0, r1, r3, r2, hint)
+#define ia64_xchg8_hint(code, r1, r3, r2, hint) ia64_xchg8_hint_pred ((code), 0, r1, r3, r2, hint)
+
+#define ia64_fetchadd4_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd4_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_acq_hint(code, r1, r3, inc, hint) ia64_fetchadd8_acq_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd4_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd4_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+#define ia64_fetchadd8_rel_hint(code, r1, r3, inc, hint) ia64_fetchadd8_rel_hint_pred ((code), 0, r1, r3, inc, hint)
+
+
+#define ia64_setf_sig(code, f1, r2) ia64_setf_sig_pred ((code), 0, f1, r2)
+#define ia64_setf_exp(code, f1, r2) ia64_setf_exp_pred ((code), 0, f1, r2)
+#define ia64_setf_s(code, f1, r2) ia64_setf_s_pred ((code), 0, f1, r2)
+#define ia64_setf_d(code, f1, r2) ia64_setf_d_pred ((code), 0, f1, r2)
+
+
+#define ia64_getf_sig(code, r1, f2) ia64_getf_sig_pred ((code), 0, r1, f2)
+#define ia64_getf_exp(code, r1, f2) ia64_getf_exp_pred ((code), 0, r1, f2)
+#define ia64_getf_s(code, r1, f2) ia64_getf_s_pred ((code), 0, r1, f2)
+#define ia64_getf_d(code, r1, f2) ia64_getf_d_pred ((code), 0, r1, f2)
+
+
+#define ia64_chk_s_m(code, r2,disp) ia64_chk_s_m_pred ((code), 0, r2,disp)
+
+
+#define ia64_chk_s_float_m(code, f2,disp) ia64_chk_s_float_m_pred ((code), 0, f2,disp)
+
+
+#define ia64_chk_a_nc(code, r1,disp) ia64_chk_a_nc_pred ((code), 0, r1,disp)
+#define ia64_chk_a_clr(code, r1,disp) ia64_chk_a_clr_pred ((code), 0, r1,disp)
+
+
+#define ia64_chk_a_nc_float(code, f1,disp) ia64_chk_a_nc_float_pred ((code), 0, f1,disp)
+#define ia64_chk_a_clr_float(code, f1,disp) ia64_chk_a_clr_float_pred ((code), 0, f1,disp)
+
+
+#define ia64_invala(code) ia64_invala_pred ((code), 0)
+#define ia64_fwb(code) ia64_fwb_pred ((code), 0)
+#define ia64_mf(code) ia64_mf_pred ((code), 0)
+#define ia64_mf_a(code) ia64_mf_a_pred ((code), 0)
+#define ia64_srlz_d(code) ia64_srlz_d_pred ((code), 0)
+#define ia64_stlz_i(code) ia64_stlz_i_pred ((code), 0)
+#define ia64_sync_i(code) ia64_sync_i_pred ((code), 0)
+
+
+#define ia64_flushrs(code) ia64_flushrs_pred ((code), 0)
+#define ia64_loadrs(code) ia64_loadrs_pred ((code), 0)
+
+#define ia64_invala_e(code, r1) ia64_invala_e_pred ((code), 0, r1)
+
+
+#define ia64_invala_e_float(code, f1) ia64_invala_e_float_pred ((code), 0, f1)
+
+
+#define ia64_fc(code, r3) ia64_fc_pred ((code), 0, r3)
+#define ia64_fc_i(code, r3) ia64_fc_i_pred ((code), 0, r3)
+
+
+#define ia64_mov_to_ar_m(code, ar3, r2) ia64_mov_to_ar_m_pred ((code), 0, ar3, r2)
+
+
+#define ia64_mov_to_ar_imm_m(code, ar3, imm) ia64_mov_to_ar_imm_m_pred ((code), 0, ar3, imm)
+
+
+#define ia64_mov_from_ar_m(code, r1, ar3) ia64_mov_from_ar_m_pred ((code), 0, r1, ar3)
+
+#define ia64_mov_to_cr(code, cr3, r2) ia64_mov_to_cr_pred ((code), 0, cr3, r2)
+
+
+#define ia64_mov_from_cr(code, r1, cr3) ia64_mov_from_cr_pred ((code), 0, r1, cr3)
+
+
+#define ia64_alloc(code, r1, i, l, o, r) ia64_alloc_pred ((code), 0, r1, i, l, o, r)
+
+
+#define ia64_mov_to_psr_l(code, r2) ia64_mov_to_psr_l_pred ((code), 0, r2)
+#define ia64_mov_to_psr_um(code, r2) ia64_mov_to_psr_um_pred ((code), 0, r2)
+
+
+#define ia64_mov_from_psr(code, r1) ia64_mov_from_psr_pred ((code), 0, r1)
+#define ia64_mov_from_psr_um(code, r1) ia64_mov_from_psr_um_pred ((code), 0, r1)
+
+
+#define ia64_break_m(code, imm) ia64_break_m_pred ((code), 0, imm)
+
+/* The System/Memory Management instruction encodings (M38-M47) */
+
+
+#define ia64_nop_m(code, imm) ia64_nop_m_pred ((code), 0, imm)
+#define ia64_hint_m(code, imm) ia64_hint_m_pred ((code), 0, imm)
+
+#define ia64_br_cond_hint(code, disp, bwh, ph, dh) ia64_br_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wexit_hint(code, disp, bwh, ph, dh) ia64_br_wexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_wtop_hint(code, disp, bwh, ph, dh) ia64_br_wtop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_br_cloop_hint(code, disp, bwh, ph, dh) ia64_br_cloop_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_cexit_hint(code, disp, bwh, ph, dh) ia64_br_cexit_hint_pred ((code), 0, disp, bwh, ph, dh)
+#define ia64_br_ctop_hint(code, disp, bwh, ph, dh) ia64_br_ctop_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+#define ia64_br_call_hint(code, b1, disp, bwh, ph, dh) ia64_br_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+#define ia64_br_cond_reg_hint(code, b1, bwh, ph, dh) ia64_br_cond_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ia_reg_hint(code, b1, bwh, ph, dh) ia64_br_ia_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+#define ia64_br_ret_reg_hint(code, b1, bwh, ph, dh) ia64_br_ret_reg_hint_pred ((code), 0, b1, bwh, ph, dh)
+
+#define ia64_br_call_reg_hint(code, b1, b2, bwh, ph, dh) ia64_br_call_reg_hint_pred ((code), 0, b1, b2, bwh, ph, dh)
+
+/* Pseudo ops */
+
+#define ia64_br_cond(code, disp) ia64_br_cond_pred (code, 0, disp)
+#define ia64_br_wexit(code, disp) ia64_br_wexit_pred (code, 0, disp)
+#define ia64_br_wtop(code, disp) ia64_br_wtop_pred (code, 0, disp)
+
+#define ia64_br_cloop(code, disp) ia64_br_cloop_pred (code, 0, disp)
+#define ia64_br_cexit(code, disp) ia64_br_cexit_pred (code, 0, disp)
+#define ia64_br_ctop(code, disp) ia64_br_ctop_pred (code, 0, disp)
+
+#define ia64_br_call(code, b1, disp) ia64_br_call_pred (code, 0, b1, disp)
+
+#define ia64_br_cond_reg(code, b1) ia64_br_cond_reg_pred (code, 0, b1)
+#define ia64_br_ia_reg(code, b1) ia64_br_ia_reg_pred (code, 0, b1)
+#define ia64_br_ret_reg(code, b1) ia64_br_ret_reg_pred (code, 0, b1)
+
+#define ia64_br_call_reg(code, b1, b2) ia64_br_call_reg_pred (code, 0, b1, b2)
+
+/* End of pseudo ops */
+
+#define ia64_cover(code) ia64_cover_pred ((code), 0)
+#define ia64_clrrrb(code) ia64_clrrrb_pred ((code), 0)
+#define ia64_clrrrb_pr(code) ia64_clrrrb_pr_pred ((code), 0)
+#define ia64_rfi(code) ia64_rfi_pred ((code), 0)
+#define ia64_bsw_0(code) ia64_bsw_0_pred ((code), 0)
+#define ia64_bsw_1(code) ia64_bsw_1_pred ((code), 0)
+#define ia64_epc(code) ia64_epc_pred ((code), 0)
+
+
+#define ia64_break_b(code, imm) ia64_break_b_pred ((code), 0, imm)
+#define ia64_nop_b(code, imm) ia64_nop_b_pred ((code), 0, imm)
+#define ia64_hint_b(code, imm) ia64_hint_b_pred ((code), 0, imm)
+
+
+#define ia64_break_x(code, imm) ia64_break_x_pred ((code), 0, imm)
+
+
+#define ia64_movl(code, r1, imm) ia64_movl_pred ((code), 0, (r1), (imm))
+
+
+#define ia64_brl_cond_hint(code, disp, bwh, ph, dh) ia64_brl_cond_hint_pred ((code), 0, disp, bwh, ph, dh)
+
+
+#define ia64_brl_call_hint(code, b1, disp, bwh, ph, dh) ia64_brl_call_hint_pred ((code), 0, b1, disp, bwh, ph, dh)
+
+
+#define ia64_nop_x(code, imm) ia64_nop_x_pred ((code), 0, imm)
+#define ia64_hint_x(code, imm) ia64_hint_x_pred ((code), 0, imm)
+
+/*
+ * Pseudo-ops
+ */
+
+#define ia64_mov_pred(code, qp, r1, r3) ia64_adds_imm_pred ((code), (qp), (r1), 0, (r3))
+#define ia64_mov(code, r1, r3) ia64_mov_pred ((code), 0, (r1), (r3))
+
+/*
+ * FLOATING POINT
+ */
+
+#define ia64_fma_sf(code, f1, f3, f4, f2, sf) ia64_fma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_s_sf(code, f1, f3, f4, f2, sf) ia64_fma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fma_d_sf(code, f1, f3, f4, f2, sf) ia64_fma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpma_sf(code, f1, f3, f4, f2, sf) ia64_fpma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_sf(code, f1, f3, f4, f2, sf) ia64_fms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_s_sf(code, f1, f3, f4, f2, sf) ia64_fms_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fms_d_sf(code, f1, f3, f4, f2, sf) ia64_fms_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpms_sf(code, f1, f3, f4, f2, sf) ia64_fpms_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_sf(code, f1, f3, f4, f2, sf) ia64_fnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_s_sf(code, f1, f3, f4, f2, sf) ia64_fnma_s_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fnma_d_sf(code, f1, f3, f4, f2, sf) ia64_fnma_d_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+#define ia64_fpnma_sf(code, f1, f3, f4, f2, sf) ia64_fpnma_sf_pred ((code), 0, f1, f3, f4, f2, sf)
+
+/* Pseudo ops */
+#define ia64_fnorm_s_sf(code, f1, f3, sf) ia64_fnorm_s_sf_pred ((code), 0, (f1), (f3), (sf))
+#define ia64_fnorm_d_sf(code, f1, f3, sf) ia64_fnorm_d_sf_pred ((code), 0, (f1), (f3), (sf))
+
+#define ia64_xma_l(code, f1, f3, f4, f2) ia64_xma_l_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_h(code, f1, f3, f4, f2) ia64_xma_h_pred ((code), 0, f1, f3, f4, f2)
+#define ia64_xma_hu(code, f1, f3, f4, f2) ia64_xma_hu_pred ((code), 0, f1, f3, f4, f2)
+
+/* Pseudo ops */
+#define ia64_xmpy_l(code, f1, f3, f4) ia64_xmpy_l_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_lu(code, f1, f3, f4) ia64_xmpy_lu_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_h(code, f1, f3, f4) ia64_xmpy_h_pred ((code), 0, (f1), (f3), (f4))
+#define ia64_xmpy_hu(code, f1, f3, f4) ia64_xmpy_hu_pred ((code), 0, (f1), (f3), (f4))
+
+#define ia64_fselect(code, f1, f3, f4, f2) ia64_fselect_pred ((code), 0, f1, f3, f4, f2)
+
+#define ia64_fcmp_eq_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_eq_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_eq_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_lt_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_lt_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_le_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_le_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_unord_unc_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_unord_unc_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+/* Pseudo ops */
+#define ia64_fcmp_gt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_gt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ne_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ne_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nlt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nlt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nle_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nle_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ngt_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ngt_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_nge_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_nge_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+#define ia64_fcmp_ord_sf(code, p1, p2, f2, f3, sf) ia64_fcmp_ord_sf_pred ((code), 0, p1, p2, f2, f3, sf)
+
+#define ia64_fclass_m(code, p1, p2, f2, fclass) ia64_fclass_m_pred ((code), 0, p1, p2, f2, fclass)
+#define ia64_fclass_m_unc(code, p1, p2, f2, fclass) ia64_fclass_m_unc_pred ((code), 0, p1, p2, f2, fclass)
+
+#define ia64_frcpa_sf(code, f1, p2, f2, f3, sf) ia64_frcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+#define ia64_fprcpa_sf(code, f1, p2, f2, f3, sf) ia64_fprcpa_sf_pred ((code), 0, f1, p2, f2, f3, sf)
+
+#define ia64_frsqrta_sf(code, f1, p2, f3, sf) ia64_frsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+#define ia64_fprsqrta_sf(code, f1, p2, f3, sf) ia64_fprsqrta_sf_pred ((code), 0, f1, p2, f3, sf)
+
+#define ia64_fmin_sf(code, f1, f2, f3, sf) ia64_fmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fman_sf(code, f1, f2, f3, sf) ia64_fman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famin_sf(code, f1, f2, f3, sf) ia64_famin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_famax_sf(code, f1, f2, f3, sf) ia64_famax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpmin_sf(code, f1, f2, f3, sf) ia64_fpmin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpman_sf(code, f1, f2, f3, sf) ia64_fpman_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamin_sf(code, f1, f2, f3, sf) ia64_fpamin_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpamax_sf(code, f1, f2, f3, sf) ia64_fpamax_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_eq_sf(code, f1, f2, f3, sf) ia64_fpcmp_eq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_lt_sf(code, f1, f2, f3, sf) ia64_fpcmp_lt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_le_sf(code, f1, f2, f3, sf) ia64_fpcmp_le_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_unord_sf(code, f1, f2, f3, sf) ia64_fpcmp_unord_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_neq_sf(code, f1, f2, f3, sf) ia64_fpcmp_neq_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nlt_sf(code, f1, f2, f3, sf) ia64_fpcmp_nlt_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_nle_sf(code, f1, f2, f3, sf) ia64_fpcmp_nle_sf_pred ((code), 0, f1, f2, f3, sf)
+#define ia64_fpcmp_ord_sf(code, f1, f2, f3, sf) ia64_fpcmp_ord_sf_pred ((code), 0, f1, f2, f3, sf)
+
+#define ia64_fmerge_s(code, f1, f2, f3) ia64_fmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_ns(code, f1, f2, f3) ia64_fmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fmerge_se(code, f1, f2, f3) ia64_fmerge_se_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_lr(code, f1, f2, f3) ia64_fmix_lr_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_r(code, f1, f2, f3) ia64_fmix_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fmix_l(code, f1, f2, f3) ia64_fmix_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_r(code, f1, f2, f3) ia64_fsxt_r_pred ((code), 0, f1, f2, f3)
+#define ia64_fsxt_l(code, f1, f2, f3) ia64_fsxt_l_pred ((code), 0, f1, f2, f3)
+#define ia64_fpack(code, f1, f2, f3) ia64_fpack_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap(code, f1, f2, f3) ia64_fswap_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nl(code, f1, f2, f3) ia64_fswap_nl_pred ((code), 0, f1, f2, f3)
+#define ia64_fswap_nr(code, f1, f2, f3) ia64_fswap_nr_pred ((code), 0, f1, f2, f3)
+#define ia64_fand(code, f1, f2, f3) ia64_fand_pred ((code), 0, f1, f2, f3)
+#define ia64_fandcm(code, f1, f2, f3) ia64_fandcm_pred ((code), 0, f1, f2, f3)
+#define ia64_for(code, f1, f2, f3) ia64_for_pred ((code), 0, f1, f2, f3)
+#define ia64_fxor(code, f1, f2, f3) ia64_fxor_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_s(code, f1, f2, f3) ia64_fpmerge_s_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_ns(code, f1, f2, f3) ia64_fpmerge_ns_pred ((code), 0, f1, f2, f3)
+#define ia64_fpmerge_se(code, f1, f2, f3) ia64_fpmerge_se_pred ((code), 0, f1, f2, f3)
+
+/* Pseudo ops */
+#define ia64_fmov(code, f1, f3) ia64_fmov_pred ((code), 0, (f1), (f3))
+
+#define ia64_fcvt_fx_sf(code, f1, f2, sf) ia64_fcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_sf(code, f1, f2, sf) ia64_fcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_sf(code, f1, f2, sf) ia64_fpcvt_fx_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_sf(code, f1, f2, sf) ia64_fpcvt_fxu_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fx_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fx_trunc_sf_pred ((code), 0, f1, f2, sf)
+#define ia64_fpcvt_fxu_trunc_sf(code, f1, f2, sf) ia64_fpcvt_fxu_trunc_sf_pred ((code), 0, f1, f2, sf)
+
+#define ia64_fcvt_xf(code, f1, f2) ia64_fcvt_xf_pred ((code), 0, f1, f2)
+
+#define ia64_fsetc_sf(code, amask, omask, sf) ia64_fsetc_sf_pred ((code), 0, amask, omask, sf)
+
+#define ia64_fclrf_sf(code, sf) ia64_fclrf_sf_pred ((code), 0, sf)
+
+#define ia64_fchkf_sf(code, disp, sf) ia64_fchkf_sf_pred ((code), 0, disp, sf)
+
+#define ia64_break_f(code, imm) ia64_break_f_pred ((code), 0, imm)
+
+
+#endif
diff --git a/src/arch/mips/.gitignore b/src/arch/mips/.gitignore
new file mode 100644
index 0000000..13efac7
--- /dev/null
+++ b/src/arch/mips/.gitignore
@@ -0,0 +1,6 @@
+/
+/Makefile
+/Makefile.in
+/*.o
+/*.lo
+/.deps
diff --git a/src/arch/mips/Makefile.am b/src/arch/mips/Makefile.am
new file mode 100644
index 0000000..1063365
--- /dev/null
+++ b/src/arch/mips/Makefile.am
@@ -0,0 +1,8 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-mips.la
+
+libmonoarch_mips_la_SOURCES = mips-codegen.h
+
+noinst_PROGRAMS = test
diff --git a/src/arch/mips/mips-codegen.h b/src/arch/mips/mips-codegen.h
new file mode 100644
index 0000000..1dbd1c6
--- /dev/null
+++ b/src/arch/mips/mips-codegen.h
@@ -0,0 +1,435 @@
+#ifndef __MIPS_CODEGEN_H__
+#define __MIPS_CODEGEN_H__
+/*
+ * Copyright (c) 2004 Novell, Inc
+ * Author: Paolo Molaro (lupus@ximian.com)
+ *
+ */
+
+/* registers */
+enum {
+ mips_zero,
+ mips_at, /* assembler temp */
+ mips_v0, /* return values */
+ mips_v1,
+ mips_a0, /* 4 - func arguments */
+ mips_a1,
+ mips_a2,
+ mips_a3,
+#if _MIPS_SIM == _ABIO32
+ mips_t0, /* 8 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+ mips_t4,
+ mips_t5,
+ mips_t6,
+ mips_t7,
+#elif _MIPS_SIM == _ABIN32
+ mips_a4, /* 4 more argument registers */
+ mips_a5,
+ mips_a6,
+ mips_a7,
+ mips_t0, /* 4 temporaries */
+ mips_t1,
+ mips_t2,
+ mips_t3,
+#endif
+ mips_s0, /* 16 calle saved */
+ mips_s1,
+ mips_s2,
+ mips_s3,
+ mips_s4,
+ mips_s5,
+ mips_s6,
+ mips_s7,
+ mips_t8, /* 24 temps */
+ mips_t9, /* 25 temp / pic call-through register */
+ mips_k0, /* 26 kernel-reserved */
+ mips_k1,
+ mips_gp, /* 28 */
+ mips_sp, /* stack pointer */
+ mips_fp, /* frame pointer */
+ mips_ra /* return address */
+};
+
+/* we treat the register file as containing just doubles... */
+enum {
+ mips_f0, /* return regs */
+ mips_f1,
+ mips_f2,
+ mips_f3,
+ mips_f4, /* temps */
+ mips_f5,
+ mips_f6,
+ mips_f7,
+ mips_f8,
+ mips_f9,
+ mips_f10,
+ mips_f11,
+ mips_f12, /* first arg */
+ mips_f13,
+ mips_f14, /* second arg */
+ mips_f15,
+ mips_f16, /* temps */
+ mips_f17,
+ mips_f18,
+ mips_f19,
+ mips_f20, /* callee saved */
+ mips_f21,
+ mips_f22,
+ mips_f23,
+ mips_f24,
+ mips_f25,
+ mips_f26,
+ mips_f27,
+ mips_f28,
+ mips_f29,
+ mips_f30,
+ mips_f31
+};
+
+/* prefetch hints */
+enum {
+ MIPS_FOR_LOAD,
+ MIPS_FOR_STORE,
+ MIPS_FOR_LOAD_STREAMED = 4,
+ MIPS_FOR_STORE_STREAMED,
+ MIPS_FOR_LOAD_RETAINED,
+ MIPS_FOR_STORE_RETAINED
+};
+
+/* coprocessors */
+enum {
+ MIPS_COP0,
+ MIPS_COP1,
+ MIPS_COP2,
+ MIPS_COP3
+};
+
+enum {
+ MIPS_FMT_SINGLE = 16,
+ MIPS_FMT_DOUBLE = 17,
+ MIPS_FMT_WORD = 20,
+ MIPS_FMT_LONG = 21,
+ MIPS_FMT3_SINGLE = 0,
+ MIPS_FMT3_DOUBLE = 1
+};
+
+/* fpu rounding mode */
+enum {
+ MIPS_ROUND_TO_NEAREST,
+ MIPS_ROUND_TO_ZERO,
+ MIPS_ROUND_TO_POSINF,
+ MIPS_ROUND_TO_NEGINF,
+ MIPS_ROUND_MASK = 3
+};
+
+/* fpu enable/cause flags, cc */
+enum {
+ MIPS_FPU_C_MASK = 1 << 23,
+ MIPS_INEXACT = 1,
+ MIPS_UNDERFLOW = 2,
+ MIPS_OVERFLOW = 4,
+ MIPS_DIVZERO = 8,
+ MIPS_INVALID = 16,
+ MIPS_NOTIMPL = 32,
+ MIPS_FPU_FLAGS_OFFSET = 2,
+ MIPS_FPU_ENABLES_OFFSET = 7,
+ MIPS_FPU_CAUSES_OFFSET = 12
+};
+
+/* fpu condition values - see manual entry for C.cond.fmt instructions */
+enum {
+ MIPS_FPU_F,
+ MIPS_FPU_UN,
+ MIPS_FPU_EQ,
+ MIPS_FPU_UEQ,
+ MIPS_FPU_OLT,
+ MIPS_FPU_ULT,
+ MIPS_FPU_OLE,
+ MIPS_FPU_ULE,
+ MIPS_FPU_SF,
+ MIPS_FPU_NGLE,
+ MIPS_FPU_SEQ,
+ MIPS_FPU_NGL,
+ MIPS_FPU_LT,
+ MIPS_FPU_NGE,
+ MIPS_FPU_LE,
+ MIPS_FPU_NGT
+};
+
+#if SIZEOF_REGISTER == 4
+
+#define MIPS_SW mips_sw
+#define MIPS_LW mips_lw
+#define MIPS_ADDU mips_addu
+#define MIPS_ADDIU mips_addiu
+#define MIPS_SWC1 mips_swc1
+#define MIPS_LWC1 mips_lwc1
+#define MIPS_MOVE mips_move
+
+#elif SIZEOF_REGISTER == 8
+
+#define MIPS_SW mips_sd
+#define MIPS_LW mips_ld
+#define MIPS_ADDU mips_daddu
+#define MIPS_ADDIU mips_daddiu
+#define MIPS_SWC1 mips_sdc1
+#define MIPS_LWC1 mips_ldc1
+#define MIPS_MOVE mips_dmove
+
+#else
+#error Unknown SIZEOF_REGISTER
+#endif
+
+#define mips_emit32(c,x) do { \
+ *((guint32 *) (void *)(c)) = x; \
+ (c) = (typeof(c))(((guint32 *)(void *)(c)) + 1); \
+ } while (0)
+
+#define mips_format_i(code,op,rs,rt,imm) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((imm)&0xffff)))
+#define mips_format_j(code,op,imm) mips_emit32 ((code), (((op)<<26)|((imm)&0x03ffffff)))
+#define mips_format_r(code,op,rs,rt,rd,sa,func) mips_emit32 ((code), (((op)<<26)|((rs)<<21)|((rt)<<16)|((rd)<<11)|((sa)<<6)|(func)))
+#define mips_format_divmul(code,op,src1,src2,fun) mips_emit32 ((code), (((op)<<26)|((src1)<<21)|((src2)<<16)|(fun)))
+
+#define mips_is_imm16(val) ((gint)(gshort)(gint)(val) == (gint)(val))
+
+/* Load always using lui/addiu pair (for later patching) */
+#define mips_load(c,D,v) do { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* load constant - no patch-up */
+#define mips_load_const(c,D,v) do { \
+ if (!mips_is_imm16 ((v))) { \
+ if (((guint32)(v)) & (1 << 15)) { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)+1); \
+ } \
+ else { \
+ mips_lui ((c), (D), mips_zero, (((guint32)(v))>>16)); \
+ } \
+ if (((guint32)(v)) & 0xffff) \
+ mips_addiu ((c), (D), (D), ((guint32)(v)) & 0xffff); \
+ } \
+ else \
+ mips_addiu ((c), (D), mips_zero, ((guint32)(v)) & 0xffff); \
+ } while (0)
+
+/* arithmetric ops */
+#define mips_add(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,32)
+#define mips_addi(c,dest,src1,imm) mips_format_i(c,8,src1,dest,imm)
+#define mips_addu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,33)
+#define mips_addiu(c,dest,src1,imm) mips_format_i(c,9,src1,dest,imm)
+#define mips_dadd(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,44)
+#define mips_daddi(c,dest,src1,imm) mips_format_i(c,24,src1,dest,imm)
+#define mips_daddu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,45)
+#define mips_daddiu(c,dest,src1,imm) mips_format_i(c,25,src1,dest,imm)
+#define mips_dsub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,46)
+#define mips_dsubu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,47)
+#define mips_mul(c,dest,src1,src2) mips_format_r(c,28,src1,src2,dest,0,2)
+#define mips_sub(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,34)
+#define mips_subu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,35)
+
+/* div and mul ops */
+#define mips_ddiv(c,src1,src2) mips_format_divmul(c,0,src1,src2,30)
+#define mips_ddivu(c,src1,src2) mips_format_divmul(c,0,src1,src2,31)
+#define mips_div(c,src1,src2) mips_format_divmul(c,0,src1,src2,26)
+#define mips_divu(c,src1,src2) mips_format_divmul(c,0,src1,src2,27)
+#define mips_dmult(c,src1,src2) mips_format_divmul(c,0,src1,src2,28)
+#define mips_dmultu(c,src1,src2) mips_format_divmul(c,0,src1,src2,29)
+#define mips_mult(c,src1,src2) mips_format_divmul(c,0,src1,src2,24)
+#define mips_multu(c,src1,src2) mips_format_divmul(c,0,src1,src2,25)
+
+/* shift ops */
+#define mips_dsll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,56)
+#define mips_dsll32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,60)
+#define mips_dsllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,20)
+#define mips_dsra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,59)
+#define mips_dsra32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,63)
+#define mips_dsrav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,23)
+#define mips_dsrl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,58)
+#define mips_dsrl32(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,62)
+#define mips_dsrlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,22)
+#define mips_sll(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,0)
+#define mips_sllv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,4)
+#define mips_sra(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,3)
+#define mips_srav(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,7)
+#define mips_srl(c,dest,src1,imm) mips_format_r(c,0,0,src1,dest,imm,2)
+#define mips_srlv(c,dest,src1,src2) mips_format_r(c,0,src2,src1,dest,0,6)
+
+/* logical ops */
+#define mips_and(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,36)
+#define mips_andi(c,dest,src1,imm) mips_format_i(c,12,src1,dest,imm)
+#define mips_nor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,39)
+#define mips_or(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,37)
+#define mips_ori(c,dest,src1,uimm) mips_format_i(c,13,src1,dest,uimm)
+#define mips_xor(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,38)
+#define mips_xori(c,dest,src1,uimm) mips_format_i(c,14,src1,dest,uimm)
+
+/* compares */
+#define mips_slt(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,42)
+#define mips_slti(c,dest,src1,imm) mips_format_i(c,10,src1,dest,imm)
+#define mips_sltiu(c,dest,src1,imm) mips_format_i(c,11,src1,dest,imm)
+#define mips_sltu(c,dest,src1,src2) mips_format_r(c,0,src1,src2,dest,0,43)
+/* missing traps: teq, teqi, tge, tgei, tgeiu, tgeu, tlt, tlti, tltiu, tltu, tne, tnei, */
+
+/* conditional branches */
+#define mips_beq(c,src1,src2,offset) mips_format_i(c,4,src1,src2,offset)
+#define mips_beql(c,src1,src2,offset) mips_format_i(c,20,src1,src2,offset)
+#define mips_bgez(c,src1,offset) mips_format_i(c,1,src1,1,offset)
+#define mips_bgezal(c,src1,offset) mips_format_i(c,1,src1,17,offset)
+#define mips_bgezall(c,src1,offset) mips_format_i(c,1,src1,19,offset)
+#define mips_bgezl(c,src1,offset) mips_format_i(c,1,src1,3,offset)
+#define mips_bgtz(c,src1,offset) mips_format_i(c,7,src1,0,offset)
+#define mips_bgtzl(c,src1,offset) mips_format_i(c,23,src1,0,offset)
+#define mips_blez(c,src1,offset) mips_format_i(c,6,src1,0,offset)
+#define mips_blezl(c,src1,offset) mips_format_i(c,22,src1,0,offset)
+#define mips_bltz(c,src1,offset) mips_format_i(c,1,src1,0,offset)
+#define mips_bltzal(c,src1,offset) mips_format_i(c,1,src1,16,offset)
+#define mips_bltzall(c,src1,offset) mips_format_i(c,1,src1,18,offset)
+#define mips_bltzl(c,src1,offset) mips_format_i(c,1,src1,2,offset)
+#define mips_bne(c,src1,src2,offset) mips_format_i(c,5,src1,src2,offset)
+#define mips_bnel(c,src1,src2,offset) mips_format_i(c,21,src1,src2,offset)
+
+/* uncond branches and calls */
+#define mips_jump(c,target) mips_format_j(c,2,target)
+#define mips_jumpl(c,target) mips_format_j(c,3,target)
+#define mips_jalr(c,src1,retreg) mips_format_r(c,0,src1,0,retreg,0,9)
+#define mips_jr(c,src1) mips_emit32(c,((src1)<<21)|8)
+
+/* loads and stores */
+#define mips_lb(c,dest,base,offset) mips_format_i(c,32,base,dest,offset)
+#define mips_lbu(c,dest,base,offset) mips_format_i(c,36,base,dest,offset)
+#define mips_ld(c,dest,base,offset) mips_format_i(c,55,base,dest,offset)
+#define mips_ldl(c,dest,base,offset) mips_format_i(c,26,base,dest,offset)
+#define mips_ldr(c,dest,base,offset) mips_format_i(c,27,base,dest,offset)
+#define mips_lh(c,dest,base,offset) mips_format_i(c,33,base,dest,offset)
+#define mips_lhu(c,dest,base,offset) mips_format_i(c,37,base,dest,offset)
+#define mips_ll(c,dest,base,offset) mips_format_i(c,48,base,dest,offset)
+#define mips_lld(c,dest,base,offset) mips_format_i(c,52,base,dest,offset)
+#define mips_lui(c,dest,base,uimm) mips_format_i(c,15,base,dest,uimm)
+#define mips_lw(c,dest,base,offset) mips_format_i(c,35,base,dest,offset)
+#define mips_lwl(c,dest,base,offset) mips_format_i(c,34,base,dest,offset)
+#define mips_lwr(c,dest,base,offset) mips_format_i(c,38,base,dest,offset)
+#define mips_lwu(c,dest,base,offset) mips_format_i(c,39,base,dest,offset)
+
+#define mips_sb(c,src,base,offset) mips_format_i(c,40,base,src,offset)
+#define mips_sc(c,src,base,offset) mips_format_i(c,56,base,src,offset)
+#define mips_scd(c,src,base,offset) mips_format_i(c,60,base,src,offset)
+#define mips_sd(c,src,base,offset) mips_format_i(c,63,base,src,offset)
+#define mips_sdl(c,src,base,offset) mips_format_i(c,44,base,src,offset)
+#define mips_sdr(c,src,base,offset) mips_format_i(c,45,base,src,offset)
+#define mips_sh(c,src,base,offset) mips_format_i(c,41,base,src,offset)
+#define mips_sw(c,src,base,offset) mips_format_i(c,43,base,src,offset)
+#define mips_swl(c,src,base,offset) mips_format_i(c,50,base,src,offset)
+#define mips_swr(c,src,base,offset) mips_format_i(c,54,base,src,offset)
+
+/* misc and coprocessor ops */
+#define mips_move(c,dest,src) mips_addu(c,dest,src,mips_zero)
+#define mips_dmove(c,dest,src) mips_daddu(c,dest,src,mips_zero)
+#define mips_nop(c) mips_or(c,mips_at,mips_at,0)
+#define mips_break(c,code) mips_emit32(c, ((code)<<6)|13)
+#define mips_mfhi(c,dest) mips_format_r(c,0,0,0,dest,0,16)
+#define mips_mflo(c,dest) mips_format_r(c,0,0,0,dest,0,18)
+#define mips_mthi(c,src) mips_format_r(c,0,src,0,0,0,17)
+#define mips_mtlo(c,src) mips_format_r(c,0,src,0,0,0,19)
+#define mips_movn(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,11)
+#define mips_movz(c,dest,src,test) mips_format_r(c,0,src,test,dest,0,10)
+#define mips_pref(c,hint,base,offset) mips_format_i(c,51,base,hint,offset)
+#define mips_prefidx(c,hint,base,idx) mips_format_r(c,19,base,idx,hint,0,15)
+#define mips_sync(c,stype) mips_emit32(c, ((stype)<<6)|15)
+#define mips_syscall(c,code) mips_emit32(c, ((code)<<6)|12)
+
+#define mips_cop(c,cop,fun) mips_emit32(c, ((16|(cop))<<26)|(fun))
+#define mips_ldc(c,cop,dest,base,offset) mips_format_i(c,(52|(cop)),base,dest,offset)
+#define mips_lwc(c,cop,dest,base,offset) mips_format_i(c,(48|(cop)),base,dest,offset)
+#define mips_sdc(c,cop,src,base,offset) mips_format_i(c,(60|(cop)),base,src,offset)
+#define mips_swc(c,cop,src,base,offset) mips_format_i(c,(56|(cop)),base,src,offset)
+#define mips_cfc1(c,dest,src) mips_format_r(c,17,2,dest,src,0,0)
+#define mips_ctc1(c,dest,src) mips_format_r(c,17,6,dest,src,0,0)
+
+/* fpu ops */
+#define mips_fabss(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,5)
+#define mips_fabsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,5)
+#define mips_fadds(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,0)
+#define mips_faddd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,0)
+#define mips_fdivs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,3)
+#define mips_fdivd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,3)
+#define mips_fmuls(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,2)
+#define mips_fmuld(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,2)
+#define mips_fnegs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,7)
+#define mips_fnegd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,7)
+#define mips_fsqrts(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,4)
+#define mips_fsqrtd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,4)
+#define mips_fsubs(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,dest,1)
+#define mips_fsubd(c,dest,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,dest,1)
+#define mips_madds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_SINGLE)
+#define mips_maddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,32|MIPS_FMT_DOUBLE)
+#define mips_nmadds(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_SINGLE)
+#define mips_nmaddd(c,dest,src1,src2,srcadd) mips_format_r(c,19,srcadd,src2,src1,dest,48|MIPS_FMT_DOUBLE)
+#define mips_msubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_SINGLE)
+#define mips_msubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,40|MIPS_FMT_DOUBLE)
+#define mips_nmsubs(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_SINGLE)
+#define mips_nmsubd(c,dest,src1,src2,srcsub) mips_format_r(c,19,srcsub,src2,src1,dest,56|MIPS_FMT_DOUBLE)
+
+/* fp compare and branch */
+#define mips_fcmps(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_SINGLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fcmpd(c,cond,src1,src2) mips_format_r(c,17,MIPS_FMT_DOUBLE,src2,src1,0,(3<<4)|(cond))
+#define mips_fbfalse(c,offset) mips_format_i(c,17,8,0,offset)
+#define mips_fbfalsel(c,offset) mips_format_i(c,17,8,2,offset)
+#define mips_fbtrue(c,offset) mips_format_i(c,17,8,1,offset)
+#define mips_fbtruel(c,offset) mips_format_i(c,17,8,3,offset)
+
+/* fp convert */
+#define mips_ceills(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,10)
+#define mips_ceilld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,10)
+#define mips_ceilws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,14)
+#define mips_ceilwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,14)
+#define mips_cvtds(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,33)
+#define mips_cvtdw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,33)
+#define mips_cvtdl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,33)
+#define mips_cvtls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,37)
+#define mips_cvtld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,37)
+#define mips_cvtsd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,32)
+#define mips_cvtsw(c,dest,src) mips_format_r(c,17,MIPS_FMT_WORD,0,src,dest,32)
+#define mips_cvtsl(c,dest,src) mips_format_r(c,17,MIPS_FMT_LONG,0,src,dest,32)
+#define mips_cvtws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,36)
+#define mips_cvtwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,36)
+#define mips_floorls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,11)
+#define mips_floorld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,11)
+#define mips_floorws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,15)
+#define mips_floorwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,15)
+#define mips_roundls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,8)
+#define mips_roundld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,8)
+#define mips_roundws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,12)
+#define mips_roundwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,12)
+#define mips_truncls(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,9)
+#define mips_truncld(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,9)
+#define mips_truncws(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,13)
+#define mips_truncwd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,13)
+
+/* fp moves, loads */
+#define mips_fmovs(c,dest,src) mips_format_r(c,17,MIPS_FMT_SINGLE,0,src,dest,6)
+#define mips_fmovd(c,dest,src) mips_format_r(c,17,MIPS_FMT_DOUBLE,0,src,dest,6)
+#define mips_mfc1(c,dest,src) mips_format_r(c,17,0,dest,src,0,0)
+#define mips_mtc1(c,dest,src) mips_format_r(c,17,4,src,dest,0,0)
+#define mips_dmfc1(c,dest,src) mips_format_r(c,17,1,0,dest,src,0)
+#define mips_dmtc1(c,dest,src) mips_format_r(c,17,1,0,src,dest,0)
+#define mips_ldc1(c,dest,base,offset) mips_ldc(c,1,dest,base,offset)
+#define mips_ldxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,1)
+#define mips_lwc1(c,dest,base,offset) mips_lwc(c,1,dest,base,offset)
+#define mips_lwxc1(c,dest,base,idx) mips_format_r(c,19,base,idx,0,dest,0)
+#define mips_sdc1(c,src,base,offset) mips_sdc(c,1,src,base,offset)
+#define mips_sdxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,9)
+#define mips_swc1(c,src,base,offset) mips_swc(c,1,src,base,offset)
+#define mips_swxc1(c,src,base,idx) mips_format_r(c,19,base,idx,src,0,8)
+
+#endif /* __MIPS_CODEGEN_H__ */
+
diff --git a/src/arch/mips/test.c b/src/arch/mips/test.c
new file mode 100644
index 0000000..4f5e1ad
--- /dev/null
+++ b/src/arch/mips/test.c
@@ -0,0 +1,159 @@
+#include "config.h"
+#include <stdlib.h>
+#include <string.h>
+
+#define NO_MIPS_JIT_DEBUG
+
+#include "mips-codegen.h"
+#include "mono/metadata/class.h"
+
+/* don't run the resulting program, it will destroy your computer,
+ * just objdump -d it to inspect we generated the correct assembler.
+ */
+
+int main (int argc, char *argv[]) {
+ guint32 *code, * p;
+
+ code = p = (guint32 *) malloc (sizeof (guint32) * 1024);
+
+ mips_add (p, 3, 4, 5);
+ mips_addi (p, 3, 4, 5);
+ mips_addu (p, 3, 4, 5);
+ mips_addiu (p, 3, 4, 5);
+ mips_sub (p, 3, 4, 5);
+ mips_subu (p, 3, 4, 5);
+ mips_dadd (p, 3, 4, 5);
+ mips_daddi (p, 3, 4, 5);
+ mips_daddu (p, 3, 4, 5);
+ mips_daddiu (p, 3, 4, 5);
+ mips_dsub (p, 3, 4, 5);
+ mips_dsubu (p, 3, 4, 5);
+
+ mips_mult (p, 6, 7);
+ mips_multu (p, 6, 7);
+ mips_div (p, 6, 7);
+ mips_divu (p, 6, 7);
+ mips_dmult (p, 6, 7);
+ mips_dmultu (p, 6, 7);
+ mips_ddiv (p, 6, 7);
+ mips_ddivu (p, 6, 7);
+
+ mips_sll (p, 3, 4, 5);
+ mips_sllv (p, 3, 4, 5);
+ mips_sra (p, 3, 4, 5);
+ mips_srav (p, 3, 4, 5);
+ mips_srl (p, 3, 4, 5);
+ mips_srlv (p, 3, 4, 5);
+ mips_dsll (p, 3, 4, 5);
+ mips_dsll32 (p, 3, 4, 5);
+ mips_dsllv (p, 3, 4, 5);
+ mips_dsra (p, 3, 4, 5);
+ mips_dsra32 (p, 3, 4, 5);
+ mips_dsrav (p, 3, 4, 5);
+ mips_dsrl (p, 3, 4, 5);
+ mips_dsrl32 (p, 3, 4, 5);
+ mips_dsrlv (p, 3, 4, 5);
+
+ mips_and (p, 8, 9, 10);
+ mips_andi (p, 8, 9, 10);
+ mips_nor (p, 8, 9, 10);
+ mips_or (p, 8, 9, 10);
+ mips_ori (p, 8, 9, 10);
+ mips_xor (p, 8, 9, 10);
+ mips_xori (p, 8, 9, 10);
+
+ mips_slt (p, 8, 9, 10);
+ mips_slti (p, 8, 9, 10);
+ mips_sltu (p, 8, 9, 10);
+ mips_sltiu (p, 8, 9, 10);
+
+ mips_beq (p, 8, 9, 0xff1f);
+ mips_beql (p, 8, 9, 0xff1f);
+ mips_bne (p, 8, 9, 0xff1f);
+ mips_bnel (p, 8, 9, 0xff1f);
+ mips_bgez (p, 11, 0xff1f);
+ mips_bgezal (p, 11, 0xff1f);
+ mips_bgezall (p, 11, 0xff1f);
+ mips_bgezl (p, 11, 0xff1f);
+ mips_bgtz (p, 11, 0xff1f);
+ mips_bgtzl (p, 11, 0xff1f);
+ mips_blez (p, 11, 0xff1f);
+ mips_blezl (p, 11, 0xff1f);
+ mips_bltz (p, 11, 0xff1f);
+ mips_bltzal (p, 11, 0xff1f);
+ mips_bltzall (p, 11, 0xff1f);
+ mips_bltzl (p, 11, 0xff1f);
+
+ mips_jump (p, 0xff1f);
+ mips_jumpl (p, 0xff1f);
+ mips_jalr (p, 12, mips_ra);
+ mips_jr (p, 12);
+
+ mips_lb (p, 13, 14, 128);
+ mips_lbu (p, 13, 14, 128);
+ mips_ld (p, 13, 14, 128);
+ mips_ldl (p, 13, 14, 128);
+ mips_ldr (p, 13, 14, 128);
+ mips_lh (p, 13, 14, 128);
+ mips_lhu (p, 13, 14, 128);
+ mips_ll (p, 13, 14, 128);
+ mips_lld (p, 13, 14, 128);
+ mips_lui (p, 13, 14, 128);
+ mips_lw (p, 13, 14, 128);
+ mips_lwl (p, 13, 14, 128);
+ mips_lwr (p, 13, 14, 128);
+ mips_lwu (p, 13, 14, 128);
+ mips_sb (p, 13, 14, 128);
+ mips_sc (p, 13, 14, 128);
+ mips_scd (p, 13, 14, 128);
+ mips_sd (p, 13, 14, 128);
+ mips_sdl (p, 13, 14, 128);
+ mips_sdr (p, 13, 14, 128);
+ mips_sh (p, 13, 14, 128);
+ mips_sw (p, 13, 14, 128);
+ mips_swl (p, 13, 14, 128);
+ mips_swr (p, 13, 14, 128);
+
+ mips_move (p, 15, 16);
+ mips_nop (p);
+ mips_break (p, 0);
+ mips_sync (p, 0);
+ mips_mfhi (p, 17);
+ mips_mflo (p, 17);
+ mips_mthi (p, 17);
+ mips_mtlo (p, 17);
+
+ mips_fabsd (p, 16, 18);
+ mips_fnegd (p, 16, 18);
+ mips_fsqrtd (p, 16, 18);
+ mips_faddd (p, 16, 18, 20);
+ mips_fdivd (p, 16, 18, 20);
+ mips_fmuld (p, 16, 18, 20);
+ mips_fsubd (p, 16, 18, 20);
+
+ mips_fcmpd (p, MIPS_FPU_EQ, 18, 20);
+ mips_fbfalse (p, 0xff1f);
+ mips_fbfalsel (p, 0xff1f);
+ mips_fbtrue (p, 0xff1f);
+ mips_fbtruel (p, 0xff1f);
+
+ mips_ceilwd (p, 20, 22);
+ mips_ceilld (p, 20, 22);
+ mips_floorwd (p, 20, 22);
+ mips_floorld (p, 20, 22);
+ mips_roundwd (p, 20, 22);
+ mips_roundld (p, 20, 22);
+ mips_truncwd (p, 20, 22);
+ mips_truncld (p, 20, 22);
+ mips_cvtdw (p, 20, 22);
+ mips_cvtds (p, 20, 22);
+ mips_cvtdl (p, 20, 22);
+ mips_cvtld (p, 20, 22);
+ mips_cvtsd (p, 20, 22);
+ mips_cvtwd (p, 20, 22);
+
+ mips_fmovd (p, 20, 22);
+ printf ("size: %d\n", p - code);
+
+ return 0;
+}
diff --git a/src/arch/ppc/.gitignore b/src/arch/ppc/.gitignore
new file mode 100644
index 0000000..c577ff6
--- /dev/null
+++ b/src/arch/ppc/.gitignore
@@ -0,0 +1,7 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
+/test
diff --git a/src/arch/ppc/Makefile.am b/src/arch/ppc/Makefile.am
new file mode 100644
index 0000000..9b209ef
--- /dev/null
+++ b/src/arch/ppc/Makefile.am
@@ -0,0 +1 @@
+EXTRA_DIST = ppc-codegen.h \ No newline at end of file
diff --git a/src/arch/ppc/ppc-codegen.h b/src/arch/ppc/ppc-codegen.h
new file mode 100644
index 0000000..55b5060
--- /dev/null
+++ b/src/arch/ppc/ppc-codegen.h
@@ -0,0 +1,953 @@
+/*
+ Authors:
+ Radek Doulik
+ Christopher Taylor <ct_AT_clemson_DOT_edu>
+ Andreas Faerber <andreas.faerber@web.de>
+
+ Copyright (C) 2001 Radek Doulik
+ Copyright (C) 2007-2008 Andreas Faerber
+
+ for testing do the following: ./test | as -o test.o
+*/
+
+#ifndef __MONO_PPC_CODEGEN_H__
+#define __MONO_PPC_CODEGEN_H__
+#include <glib.h>
+#include <assert.h>
+
+typedef enum {
+ ppc_r0 = 0,
+ ppc_r1,
+ ppc_sp = ppc_r1,
+ ppc_r2,
+ ppc_r3,
+ ppc_r4,
+ ppc_r5,
+ ppc_r6,
+ ppc_r7,
+ ppc_r8,
+ ppc_r9,
+ ppc_r10,
+ ppc_r11,
+ ppc_r12,
+ ppc_r13,
+ ppc_r14,
+ ppc_r15,
+ ppc_r16,
+ ppc_r17,
+ ppc_r18,
+ ppc_r19,
+ ppc_r20,
+ ppc_r21,
+ ppc_r22,
+ ppc_r23,
+ ppc_r24,
+ ppc_r25,
+ ppc_r26,
+ ppc_r27,
+ ppc_r28,
+ ppc_r29,
+ ppc_r30,
+ ppc_r31
+} PPCIntRegister;
+
+typedef enum {
+ ppc_f0 = 0,
+ ppc_f1,
+ ppc_f2,
+ ppc_f3,
+ ppc_f4,
+ ppc_f5,
+ ppc_f6,
+ ppc_f7,
+ ppc_f8,
+ ppc_f9,
+ ppc_f10,
+ ppc_f11,
+ ppc_f12,
+ ppc_f13,
+ ppc_f14,
+ ppc_f15,
+ ppc_f16,
+ ppc_f17,
+ ppc_f18,
+ ppc_f19,
+ ppc_f20,
+ ppc_f21,
+ ppc_f22,
+ ppc_f23,
+ ppc_f24,
+ ppc_f25,
+ ppc_f26,
+ ppc_f27,
+ ppc_f28,
+ ppc_f29,
+ ppc_f30,
+ ppc_f31
+} PPCFloatRegister;
+
+typedef enum {
+ ppc_lr = 256,
+ ppc_ctr = 256 + 32,
+ ppc_xer = 32
+} PPCSpecialRegister;
+
+enum {
+ /* B0 operand for branches */
+ PPC_BR_DEC_CTR_NONZERO_FALSE = 0,
+ PPC_BR_LIKELY = 1, /* can be or'ed with the conditional variants */
+ PPC_BR_DEC_CTR_ZERO_FALSE = 2,
+ PPC_BR_FALSE = 4,
+ PPC_BR_DEC_CTR_NONZERO_TRUE = 8,
+ PPC_BR_DEC_CTR_ZERO_TRUE = 10,
+ PPC_BR_TRUE = 12,
+ PPC_BR_DEC_CTR_NONZERO = 16,
+ PPC_BR_DEC_CTR_ZERO = 18,
+ PPC_BR_ALWAYS = 20,
+ /* B1 operand for branches */
+ PPC_BR_LT = 0,
+ PPC_BR_GT = 1,
+ PPC_BR_EQ = 2,
+ PPC_BR_SO = 3
+};
+
+enum {
+ PPC_TRAP_LT = 1,
+ PPC_TRAP_GT = 2,
+ PPC_TRAP_EQ = 4,
+ PPC_TRAP_LT_UN = 8,
+ PPC_TRAP_GT_UN = 16,
+ PPC_TRAP_LE = 1 + PPC_TRAP_EQ,
+ PPC_TRAP_GE = 2 + PPC_TRAP_EQ,
+ PPC_TRAP_LE_UN = 8 + PPC_TRAP_EQ,
+ PPC_TRAP_GE_UN = 16 + PPC_TRAP_EQ
+};
+
+#define ppc_emit32(c,x) do { *((guint32 *) (c)) = GUINT32_TO_BE (x); (c) = (gpointer)((guint8 *)(c) + sizeof (guint32));} while (0)
+
+#define ppc_is_imm16(val) ((((val)>> 15) == 0) || (((val)>> 15) == -1))
+#define ppc_is_uimm16(val) ((glong)(val) >= 0L && (glong)(val) <= 65535L)
+#define ppc_ha(val) (((val >> 16) + ((val & 0x8000) ? 1 : 0)) & 0xffff)
+
+#define ppc_load32(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), (guint32)(v) >> 16); \
+ ppc_ori ((c), (D), (D), (guint32)(v) & 0xffff); \
+ } G_STMT_END
+
+/* Macros to load/store pointer sized quantities */
+
+#if defined(__mono_ppc64__) && !defined(__mono_ilp32__)
+
+#define ppc_ldptr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_ldu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_ldux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+
+#else
+
+/* Same as ppc32 */
+#define ppc_ldptr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldptr_update(c,D,d,A) ppc_lwzu ((c), (D), (d), (A))
+#define ppc_ldptr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_ldptr_update_indexed(c,D,A,B) ppc_lwzux ((c), (D), (A), (B))
+
+#define ppc_stptr(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_stptr_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_stptr_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_stptr_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+
+#endif
+
+/* Macros to load pointer sized immediates */
+#define ppc_load_ptr(c,D,v) ppc_load ((c),(D),(gsize)(v))
+#define ppc_load_ptr_sequence(c,D,v) ppc_load_sequence ((c),(D),(gsize)(v))
+
+/* Macros to load/store regsize quantities */
+
+#ifdef __mono_ppc64__
+#define ppc_ldr(c,D,d,A) ppc_ld ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_ldx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_std ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stdu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stdx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stdux ((c), (S), (A), (B))
+#else
+#define ppc_ldr(c,D,d,A) ppc_lwz ((c), (D), (d), (A))
+#define ppc_ldr_indexed(c,D,A,B) ppc_lwzx ((c), (D), (A), (B))
+#define ppc_str(c,S,d,A) ppc_stw ((c), (S), (d), (A))
+#define ppc_str_update(c,S,d,A) ppc_stwu ((c), (S), (d), (A))
+#define ppc_str_indexed(c,S,A,B) ppc_stwx ((c), (S), (A), (B))
+#define ppc_str_update_indexed(c,S,A,B) ppc_stwux ((c), (S), (A), (B))
+#endif
+
+#define ppc_str_multiple(c,S,d,A) ppc_store_multiple_regs((c),(S),(d),(A))
+#define ppc_ldr_multiple(c,D,d,A) ppc_load_multiple_regs((c),(D),(d),(A))
+
+/* PPC32 macros */
+
+#ifndef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) ppc_load32 ((c), (D), (guint32)(v))
+
+#define PPC_LOAD_SEQUENCE_LENGTH 8
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint32)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint32)(v)); \
+ } else { \
+ ppc_load32 ((c), (D), (guint32)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,V) ppc_load_sequence ((c), (D), (V))
+
+#define ppc_load_multiple_regs(c,D,d,A) ppc_lmw ((c), (D), (d), (A))
+
+#define ppc_store_multiple_regs(c,S,d,A) ppc_stmw ((c), (S), (d), (A))
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 0, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 0, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 0, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_slw((c), (S), (A), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_slwi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srwi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_srawi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mullw((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrwi((c), (A), (S), (n))
+
+#endif
+
+#define ppc_opcode(c) ((c) >> 26)
+#define ppc_split_5_1_1(x) (((x) >> 5) & 0x1)
+#define ppc_split_5_1_5(x) ((x) & 0x1F)
+#define ppc_split_5_1(x) ((ppc_split_5_1_5(x) << 1) | ppc_split_5_1_1(x))
+
+#define ppc_break(c) ppc_tw((c),31,0,0)
+#define ppc_addi(c,D,A,i) ppc_emit32 (c, (14 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addis(c,D,A,i) ppc_emit32 (c, (15 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_li(c,D,v) ppc_addi (c, D, 0, (guint16)(v))
+#define ppc_lis(c,D,v) ppc_addis (c, D, 0, (guint16)(v))
+#define ppc_lwz(c,D,d,A) ppc_emit32 (c, (32 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lhz(c,D,d,A) ppc_emit32 (c, (40 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lbz(c,D,d,A) ppc_emit32 (c, (34 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stw(c,S,d,A) ppc_emit32 (c, (36 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_sth(c,S,d,A) ppc_emit32 (c, (44 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stb(c,S,d,A) ppc_emit32 (c, (38 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stwu(c,s,d,A) ppc_emit32 (c, (37 << 26) | ((s) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_or(c,a,s,b) ppc_emit32 (c, (31 << 26) | ((s) << 21) | ((a) << 16) | ((b) << 11) | 888)
+#define ppc_mr(c,a,s) ppc_or (c, a, s, s)
+#define ppc_ori(c,S,A,ui) ppc_emit32 (c, (24 << 26) | ((S) << 21) | ((A) << 16) | (guint16)(ui))
+#define ppc_nop(c) ppc_ori (c, 0, 0, 0)
+#define ppc_mfspr(c,D,spr) ppc_emit32 (c, (31 << 26) | ((D) << 21) | ((spr) << 11) | (339 << 1))
+#define ppc_mflr(c,D) ppc_mfspr (c, D, ppc_lr)
+#define ppc_mtspr(c,spr,S) ppc_emit32 (c, (31 << 26) | ((S) << 21) | ((spr) << 11) | (467 << 1))
+#define ppc_mtlr(c,S) ppc_mtspr (c, ppc_lr, S)
+#define ppc_mtctr(c,S) ppc_mtspr (c, ppc_ctr, S)
+#define ppc_mtxer(c,S) ppc_mtspr (c, ppc_xer, S)
+
+#define ppc_b(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2))
+#define ppc_bl(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 1)
+#define ppc_ba(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 2)
+#define ppc_bla(c,li) ppc_emit32 (c, (18 << 26) | ((li) << 2) | 3)
+#define ppc_blrl(c) ppc_emit32 (c, 0x4e800021)
+#define ppc_blr(c) ppc_emit32 (c, 0x4e800020)
+
+#define ppc_lfs(c,D,d,A) ppc_emit32 (c, (48 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_lfd(c,D,d,A) ppc_emit32 (c, (50 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(d))
+#define ppc_stfs(c,S,d,a) ppc_emit32 (c, (52 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+#define ppc_stfd(c,S,d,a) ppc_emit32 (c, (54 << 26) | ((S) << 21) | ((a) << 16) | (guint16)(d))
+
+/***********************************************************************
+The macros below were tapped out by Christopher Taylor <ct_AT_clemson_DOT_edu>
+from 18 November 2002 to 19 December 2002.
+
+Special thanks to rodo, lupus, dietmar, miguel, and duncan for patience,
+and motivation.
+
+The macros found in this file are based on the assembler instructions found
+in Motorola and Digital DNA's:
+
+"Programming Enviornments Manual For 32-bit Implementations of the PowerPC Architecture"
+
+MPCFPE32B/AD
+12/2001
+REV2
+
+see pages 326 - 524 for detailed information regarding each instruction
+
+Also see the "Ximian Copyright Agreement, 2002" for more information regarding
+my and Ximian's copyright to this code. ;)
+*************************************************************************/
+
+#define ppc_addx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (266 << 1) | Rc)
+#define ppc_add(c,D,A,B) ppc_addx(c,D,A,B,0,0)
+#define ppc_addd(c,D,A,B) ppc_addx(c,D,A,B,0,1)
+#define ppc_addo(c,D,A,B) ppc_addx(c,D,A,B,1,0)
+#define ppc_addod(c,D,A,B) ppc_addx(c,D,A,B,1,1)
+
+#define ppc_addcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (10 << 1) | Rc)
+#define ppc_addc(c,D,A,B) ppc_addcx(c,D,A,B,0,0)
+#define ppc_addcd(c,D,A,B) ppc_addcx(c,D,A,B,0,1)
+#define ppc_addco(c,D,A,B) ppc_addcx(c,D,A,B,1,0)
+#define ppc_addcod(c,D,A,B) ppc_addcx(c,D,A,B,1,1)
+
+#define ppc_addex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (OE << 10) | (138 << 1) | Rc)
+#define ppc_adde(c,D,A,B) ppc_addex(c,D,A,B,0,0)
+#define ppc_added(c,D,A,B) ppc_addex(c,D,A,B,0,1)
+#define ppc_addeo(c,D,A,B) ppc_addex(c,D,A,B,1,0)
+#define ppc_addeod(c,D,A,B) ppc_addex(c,D,A,B,1,1)
+
+#define ppc_addic(c,D,A,i) ppc_emit32(c, (12 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+#define ppc_addicd(c,D,A,i) ppc_emit32(c, (13 << 26) | ((D) << 21) | ((A) << 16) | (guint16)(i))
+
+#define ppc_addmex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (234 << 1) | RC)
+#define ppc_addme(c,D,A) ppc_addmex(c,D,A,0,0)
+#define ppc_addmed(c,D,A) ppc_addmex(c,D,A,0,1)
+#define ppc_addmeo(c,D,A) ppc_addmex(c,D,A,1,0)
+#define ppc_addmeod(c,D,A) ppc_addmex(c,D,A,1,1)
+
+#define ppc_addzex(c,D,A,OE,RC) ppc_emit32(c, (31 << 26) | ((D) << 21 ) | ((A) << 16) | (0 << 11) | ((OE) << 10) | (202 << 1) | RC)
+#define ppc_addze(c,D,A) ppc_addzex(c,D,A,0,0)
+#define ppc_addzed(c,D,A) ppc_addzex(c,D,A,0,1)
+#define ppc_addzeo(c,D,A) ppc_addzex(c,D,A,1,0)
+#define ppc_addzeod(c,D,A) ppc_addzex(c,D,A,1,1)
+
+#define ppc_andx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (28 << 1) | RC)
+#define ppc_and(c,S,A,B) ppc_andx(c,S,A,B,0)
+#define ppc_andd(c,S,A,B) ppc_andx(c,S,A,B,1)
+
+#define ppc_andcx(c,S,A,B,RC) ppc_emit32(c, (31 << 26) | ((S) << 21 ) | ((A) << 16) | ((B) << 11) | (60 << 1) | RC)
+#define ppc_andc(c,S,A,B) ppc_andcx(c,S,A,B,0)
+#define ppc_andcd(c,S,A,B) ppc_andcx(c,S,A,B,1)
+
+#define ppc_andid(c,S,A,ui) ppc_emit32(c, (28 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+#define ppc_andisd(c,S,A,ui) ppc_emit32(c, (29 << 26) | ((S) << 21 ) | ((A) << 16) | ((guint16)(ui)))
+
+#define ppc_bcx(c,BO,BI,BD,AA,LK) ppc_emit32(c, (16 << 26) | (BO << 21 )| (BI << 16) | (BD << 2) | ((AA) << 1) | LK)
+#define ppc_bc(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,0)
+#define ppc_bca(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,0)
+#define ppc_bcl(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,0,1)
+#define ppc_bcla(c,BO,BI,BD) ppc_bcx(c,BO,BI,BD,1,1)
+
+#define ppc_bcctrx(c,BO,BI,LK) ppc_emit32(c, (19 << 26) | (BO << 21 )| (BI << 16) | (0 << 11) | (528 << 1) | LK)
+#define ppc_bcctr(c,BO,BI) ppc_bcctrx(c,BO,BI,0)
+#define ppc_bcctrl(c,BO,BI) ppc_bcctrx(c,BO,BI,1)
+
+#define ppc_bnectrp(c,BO,BI) ppc_bcctr(c,BO,BI)
+#define ppc_bnectrlp(c,BO,BI) ppc_bcctr(c,BO,BI)
+
+#define ppc_bclrx(c,BO,BI,BH,LK) ppc_emit32(c, (19 << 26) | ((BO) << 21 )| ((BI) << 16) | (0 << 13) | ((BH) << 11) | (16 << 1) | (LK))
+#define ppc_bclr(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,0)
+#define ppc_bclrl(c,BO,BI,BH) ppc_bclrx(c,BO,BI,BH,1)
+
+#define ppc_bnelrp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+#define ppc_bnelrlp(c,BO,BI) ppc_bclr(c,BO,BI,0)
+
+#define ppc_cmp(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (0 << 1) | 0)
+#define ppc_cmpi(c,cfrD,L,A,B) ppc_emit32(c, (11 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpl(c,cfrD,L,A,B) ppc_emit32(c, (31 << 26) | ((cfrD) << 23) | (0 << 22) | ((L) << 21) | ((A) << 16) | ((B) << 11) | (32 << 1) | 0)
+#define ppc_cmpli(c,cfrD,L,A,B) ppc_emit32(c, (10 << 26) | (cfrD << 23) | (0 << 22) | (L << 21) | (A << 16) | (guint16)(B))
+#define ppc_cmpw(c,cfrD,A,B) ppc_cmp(c, (cfrD), 0, (A), (B))
+
+#define ppc_cntlzwx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (26 << 1) | Rc)
+#define ppc_cntlzw(c,S,A) ppc_cntlzwx(c,S,A,0)
+#define ppc_cntlzwd(c,S,A) ppc_cntlzwx(c,S,A,1)
+
+#define ppc_crand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (257 << 1) | 0)
+#define ppc_crandc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (129 << 1) | 0)
+#define ppc_creqv(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (289 << 1) | 0)
+#define ppc_crnand(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (225 << 1) | 0)
+#define ppc_crnor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (33 << 1) | 0)
+#define ppc_cror(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (449 << 1) | 0)
+#define ppc_crorc(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (417 << 1) | 0)
+#define ppc_crxor(c,D,A,B) ppc_emit32(c, (19 << 26) | (D << 21) | (A << 16) | (B << 11) | (193 << 1) | 0)
+
+#define ppc_dcba(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (758 << 1) | 0)
+#define ppc_dcbf(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (86 << 1) | 0)
+#define ppc_dcbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (470 << 1) | 0)
+#define ppc_dcbst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (54 << 1) | 0)
+#define ppc_dcbt(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (278 << 1) | 0)
+#define ppc_dcbtst(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (246 << 1) | 0)
+#define ppc_dcbz(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (1014 << 1) | 0)
+
+#define ppc_divwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (491 << 1) | Rc)
+#define ppc_divw(c,D,A,B) ppc_divwx(c,D,A,B,0,0)
+#define ppc_divwd(c,D,A,B) ppc_divwx(c,D,A,B,0,1)
+#define ppc_divwo(c,D,A,B) ppc_divwx(c,D,A,B,1,0)
+#define ppc_divwod(c,D,A,B) ppc_divwx(c,D,A,B,1,1)
+
+#define ppc_divwux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (459 << 1) | Rc)
+#define ppc_divwu(c,D,A,B) ppc_divwux(c,D,A,B,0,0)
+#define ppc_divwud(c,D,A,B) ppc_divwux(c,D,A,B,0,1)
+#define ppc_divwuo(c,D,A,B) ppc_divwux(c,D,A,B,1,0)
+#define ppc_divwuod(c,D,A,B) ppc_divwux(c,D,A,B,1,1)
+
+#define ppc_eciwx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (310 << 1) | 0)
+#define ppc_ecowx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (438 << 1) | 0)
+#define ppc_eieio(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (854 << 1) | 0)
+
+#define ppc_eqvx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (284 << 1) | Rc)
+#define ppc_eqv(c,A,S,B) ppc_eqvx(c,A,S,B,0)
+#define ppc_eqvd(c,A,S,B) ppc_eqvx(c,A,S,B,1)
+
+#define ppc_extsbx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (954 << 1) | Rc)
+#define ppc_extsb(c,A,S) ppc_extsbx(c,A,S,0)
+#define ppc_extsbd(c,A,S) ppc_extsbx(c,A,S,1)
+
+#define ppc_extshx(c,A,S,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (0 << 11) | (922 << 1) | Rc)
+#define ppc_extsh(c,A,S) ppc_extshx(c,A,S,0)
+#define ppc_extshd(c,A,S) ppc_extshx(c,A,S,1)
+
+#define ppc_fabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (264 << 1) | Rc)
+#define ppc_fabs(c,D,B) ppc_fabsx(c,D,B,0)
+#define ppc_fabsd(c,D,B) ppc_fabsx(c,D,B,1)
+
+#define ppc_faddx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadd(c,D,A,B) ppc_faddx(c,D,A,B,0)
+#define ppc_faddd(c,D,A,B) ppc_faddx(c,D,A,B,1)
+
+#define ppc_faddsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (21 << 1) | Rc)
+#define ppc_fadds(c,D,A,B) ppc_faddsx(c,D,A,B,0)
+#define ppc_faddsd(c,D,A,B) ppc_faddsx(c,D,A,B,1)
+
+#define ppc_fcmpo(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (32 << 1) | 0)
+#define ppc_fcmpu(c,crfD,A,B) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (A << 16) | (B << 11) | (0 << 1) | 0)
+
+#define ppc_fctiwx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (14 << 1) | Rc)
+#define ppc_fctiw(c,D,B) ppc_fctiwx(c,D,B,0)
+#define ppc_fctiwd(c,D,B) ppc_fctiwx(c,D,B,1)
+
+#define ppc_fctiwzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (15 << 1) | Rc)
+#define ppc_fctiwz(c,D,B) ppc_fctiwzx(c,D,B,0)
+#define ppc_fctiwzd(c,D,B) ppc_fctiwzx(c,D,B,1)
+
+#define ppc_fdivx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdiv(c,D,A,B) ppc_fdivx(c,D,A,B,0)
+#define ppc_fdivd(c,D,A,B) ppc_fdivx(c,D,A,B,1)
+
+#define ppc_fdivsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (18 << 1) | Rc)
+#define ppc_fdivs(c,D,A,B) ppc_fdivsx(c,D,A,B,0)
+#define ppc_fdivsd(c,D,A,B) ppc_fdivsx(c,D,A,B,1)
+
+#define ppc_fmaddx(c,D,A,B,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,0)
+#define ppc_fmaddd(c,D,A,B,C) ppc_fmaddx(c,D,A,B,C,1)
+
+#define ppc_fmaddsx(c,D,A,B,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (29 << 1) | Rc)
+#define ppc_fmadds(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,0)
+#define ppc_fmaddsd(c,D,A,B,C) ppc_fmaddsx(c,D,A,B,C,1)
+
+#define ppc_fmrx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (72 << 1) | Rc)
+#define ppc_fmr(c,D,B) ppc_fmrx(c,D,B,0)
+#define ppc_fmrd(c,D,B) ppc_fmrx(c,D,B,1)
+
+#define ppc_fmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsub(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,0)
+#define ppc_fmsubd(c,D,A,C,B) ppc_fmsubx(c,D,A,C,B,1)
+
+#define ppc_fmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (28 << 1) | Rc)
+#define ppc_fmsubs(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,0)
+#define ppc_fmsubsd(c,D,A,C,B) ppc_fmsubsx(c,D,A,C,B,1)
+
+#define ppc_fmulx(c,D,A,C,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmul(c,D,A,C) ppc_fmulx(c,D,A,C,0)
+#define ppc_fmuld(c,D,A,C) ppc_fmulx(c,D,A,C,1)
+
+#define ppc_fmulsx(c,D,A,C,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (0 << 11) | (C << 6) | (25 << 1) | Rc)
+#define ppc_fmuls(c,D,A,C) ppc_fmulsx(c,D,A,C,0)
+#define ppc_fmulsd(c,D,A,C) ppc_fmulsx(c,D,A,C,1)
+
+#define ppc_fnabsx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (136 << 1) | Rc)
+#define ppc_fnabs(c,D,B) ppc_fnabsx(c,D,B,0)
+#define ppc_fnabsd(c,D,B) ppc_fnabsx(c,D,B,1)
+
+#define ppc_fnegx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (40 << 1) | Rc)
+#define ppc_fneg(c,D,B) ppc_fnegx(c,D,B,0)
+#define ppc_fnegd(c,D,B) ppc_fnegx(c,D,B,1)
+
+#define ppc_fnmaddx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,0)
+#define ppc_fnmaddd(c,D,A,C,B) ppc_fnmaddx(c,D,A,C,B,1)
+
+#define ppc_fnmaddsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (31 << 1) | Rc)
+#define ppc_fnmadds(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,0)
+#define ppc_fnmaddsd(c,D,A,C,B) ppc_fnmaddsx(c,D,A,C,B,1)
+
+#define ppc_fnmsubx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsub(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,0)
+#define ppc_fnmsubd(c,D,A,C,B) ppc_fnmsubx(c,D,A,C,B,1)
+
+#define ppc_fnmsubsx(c,D,A,C,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (30 << 1) | Rc)
+#define ppc_fnmsubs(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,0)
+#define ppc_fnmsubsd(c,D,A,C,B) ppc_fnmsubsx(c,D,A,C,B,1)
+
+#define ppc_fresx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (24 << 1) | Rc)
+#define ppc_fres(c,D,B) ppc_fresx(c,D,B,0)
+#define ppc_fresd(c,D,B) ppc_fresx(c,D,B,1)
+
+#define ppc_frspx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (12 << 1) | Rc)
+#define ppc_frsp(c,D,B) ppc_frspx(c,D,B,0)
+#define ppc_frspd(c,D,B) ppc_frspx(c,D,B,1)
+
+#define ppc_frsqrtex(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (26 << 1) | Rc)
+#define ppc_frsqrte(c,D,B) ppc_frsqrtex(c,D,B,0)
+#define ppc_frsqrted(c,D,B) ppc_frsqrtex(c,D,B,1)
+
+#define ppc_fselx(c,D,A,C,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (C << 6) | (23 << 1) | Rc)
+#define ppc_fsel(c,D,A,C,B) ppc_fselx(c,D,A,C,B,0)
+#define ppc_fseld(c,D,A,C,B) ppc_fselx(c,D,A,C,B,1)
+
+#define ppc_fsqrtx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrt(c,D,B) ppc_fsqrtx(c,D,B,0)
+#define ppc_fsqrtd(c,D,B) ppc_fsqrtx(c,D,B,1)
+
+#define ppc_fsqrtsx(c,D,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (0 << 16) | (B << 11) | (0 << 6) | (22 << 1) | Rc)
+#define ppc_fsqrts(c,D,B) ppc_fsqrtsx(c,D,B,0)
+#define ppc_fsqrtsd(c,D,B) ppc_fsqrtsx(c,D,B,1)
+
+#define ppc_fsubx(c,D,A,B,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsub(c,D,A,B) ppc_fsubx(c,D,A,B,0)
+#define ppc_fsubd(c,D,A,B) ppc_fsubx(c,D,A,B,1)
+
+#define ppc_fsubsx(c,D,A,B,Rc) ppc_emit32(c, (59 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 6) | (20 << 1) | Rc)
+#define ppc_fsubs(c,D,A,B) ppc_fsubsx(c,D,A,B,0)
+#define ppc_fsubsd(c,D,A,B) ppc_fsubsx(c,D,A,B,1)
+
+#define ppc_icbi(c,A,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (A << 16) | (B << 11) | (982 << 1) | 0)
+
+#define ppc_isync(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (150 << 1) | 0)
+
+#define ppc_lbzu(c,D,d,A) ppc_emit32(c, (35 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lbzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (119 << 1) | 0)
+#define ppc_lbzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (87 << 1) | 0)
+
+#define ppc_lfdu(c,D,d,A) ppc_emit32(c, (51 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfdux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (631 << 1) | 0)
+#define ppc_lfdx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (599 << 1) | 0)
+
+#define ppc_lfsu(c,D,d,A) ppc_emit32(c, (49 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lfsux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (567 << 1) | 0)
+#define ppc_lfsx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (535 << 1) | 0)
+
+#define ppc_lha(c,D,d,A) ppc_emit32(c, (42 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhau(c,D,d,A) ppc_emit32(c, (43 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lhaux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (375 << 1) | 0)
+#define ppc_lhax(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (343 << 1) | 0)
+#define ppc_lhbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (790 << 1) | 0)
+#define ppc_lhzu(c,D,d,A) ppc_emit32(c, (41 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lhzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (311 << 1) | 0)
+#define ppc_lhzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (279 << 1) | 0)
+
+#define ppc_lmw(c,D,d,A) ppc_emit32(c, (46 << 26) | (D << 21) | (A << 16) | (guint16)d)
+
+#define ppc_lswi(c,D,A,NB) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (NB << 11) | (597 << 1) | 0)
+#define ppc_lswx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (533 << 1) | 0)
+#define ppc_lwarx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (20 << 1) | 0)
+#define ppc_lwbrx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (534 << 1) | 0)
+
+#define ppc_lwzu(c,D,d,A) ppc_emit32(c, (33 << 26) | (D << 21) | (A << 16) | (guint16)d)
+#define ppc_lwzux(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (55 << 1) | 0)
+#define ppc_lwzx(c,D,A,B) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (23 << 1) | 0)
+
+#define ppc_mcrf(c,crfD,crfS) ppc_emit32(c, (19 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | 0)
+#define ppc_mcrfs(c,crfD,crfS) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 21) | (crfS << 18) | (0 << 16) | (64 << 1) | 0)
+#define ppc_mcrxr(c,crfD) ppc_emit32(c, (31 << 26) | (crfD << 23) | (0 << 16) | (512 << 1) | 0)
+
+#define ppc_mfcr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (19 << 1) | 0)
+#define ppc_mffsx(c,D,Rc) ppc_emit32(c, (63 << 26) | (D << 21) | (0 << 16) | (583 << 1) | Rc)
+#define ppc_mffs(c,D) ppc_mffsx(c,D,0)
+#define ppc_mffsd(c,D) ppc_mffsx(c,D,1)
+#define ppc_mfmsr(c,D) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (83 << 1) | 0)
+#define ppc_mfsr(c,D,SR) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (595 << 1) | 0)
+#define ppc_mfsrin(c,D,B) ppc_emit32(c, (31 << 26) | (D << 21) | (0 << 16) | (B << 11) | (659 << 1) | 0)
+#define ppc_mftb(c,D,TBR) ppc_emit32(c, (31 << 26) | (D << 21) | (TBR << 11) | (371 << 1) | 0)
+
+#define ppc_mtcrf(c,CRM,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (CRM << 12) | (0 << 11) | (144 << 1) | 0)
+
+#define ppc_mtfsb0x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (70 << 1) | Rc)
+#define ppc_mtfsb0(c,CRB) ppc_mtfsb0x(c,CRB,0)
+#define ppc_mtfsb0d(c,CRB) ppc_mtfsb0x(c,CRB,1)
+
+#define ppc_mtfsb1x(c,CRB,Rc) ppc_emit32(c, (63 << 26) | (CRB << 21) | (0 << 11) | (38 << 1) | Rc)
+#define ppc_mtfsb1(c,CRB) ppc_mtfsb1x(c,CRB,0)
+#define ppc_mtfsb1d(c,CRB) ppc_mtfsb1x(c,CRB,1)
+
+#define ppc_mtfsfx(c,FM,B,Rc) ppc_emit32(c, (63 << 26) | (0 << 25) | (FM << 22) | (0 << 21) | (B << 11) | (711 << 1) | Rc)
+#define ppc_mtfsf(c,FM,B) ppc_mtfsfx(c,FM,B,0)
+#define ppc_mtfsfd(c,FM,B) ppc_mtfsfx(c,FM,B,1)
+
+#define ppc_mtfsfix(c,crfD,IMM,Rc) ppc_emit32(c, (63 << 26) | (crfD << 23) | (0 << 16) | (IMM << 12) | (0 << 11) | (134 << 1) | Rc)
+#define ppc_mtfsfi(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,0)
+#define ppc_mtfsfid(c,crfD,IMM) ppc_mtfsfix(c,crfD,IMM,1)
+
+#define ppc_mtmsr(c, S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 11) | (146 << 1) | 0)
+
+#define ppc_mtsr(c,SR,S) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 20) | (SR << 16) | (0 << 11) | (210 << 1) | 0)
+#define ppc_mtsrin(c,S,B) ppc_emit32(c, (31 << 26) | (S << 21) | (0 << 16) | (B << 11) | (242 << 1) | 0)
+
+#define ppc_mulhwx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (75 << 1) | Rc)
+#define ppc_mulhw(c,D,A,B) ppc_mulhwx(c,D,A,B,0)
+#define ppc_mulhwd(c,D,A,B) ppc_mulhwx(c,D,A,B,1)
+
+#define ppc_mulhwux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (0 << 10) | (11 << 1) | Rc)
+#define ppc_mulhwu(c,D,A,B) ppc_mulhwux(c,D,A,B,0)
+#define ppc_mulhwud(c,D,A,B) ppc_mulhwux(c,D,A,B,1)
+
+#define ppc_mulli(c,D,A,SIMM) ppc_emit32(c, ((07) << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_mullwx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (235 << 1) | Rc)
+#define ppc_mullw(c,D,A,B) ppc_mullwx(c,D,A,B,0,0)
+#define ppc_mullwd(c,D,A,B) ppc_mullwx(c,D,A,B,0,1)
+#define ppc_mullwo(c,D,A,B) ppc_mullwx(c,D,A,B,1,0)
+#define ppc_mullwod(c,D,A,B) ppc_mullwx(c,D,A,B,1,1)
+
+#define ppc_nandx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (476 << 1) | Rc)
+#define ppc_nand(c,A,S,B) ppc_nandx(c,A,S,B,0)
+#define ppc_nandd(c,A,S,B) ppc_nandx(c,A,S,B,1)
+
+#define ppc_negx(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (104 << 1) | Rc)
+#define ppc_neg(c,D,A) ppc_negx(c,D,A,0,0)
+#define ppc_negd(c,D,A) ppc_negx(c,D,A,0,1)
+#define ppc_nego(c,D,A) ppc_negx(c,D,A,1,0)
+#define ppc_negod(c,D,A) ppc_negx(c,D,A,1,1)
+
+#define ppc_norx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (124 << 1) | Rc)
+#define ppc_nor(c,A,S,B) ppc_norx(c,A,S,B,0)
+#define ppc_nord(c,A,S,B) ppc_norx(c,A,S,B,1)
+
+#define ppc_not(c,A,S) ppc_norx(c,A,S,S,0)
+
+#define ppc_orx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (444 << 1) | Rc)
+#define ppc_ord(c,A,S,B) ppc_orx(c,A,S,B,1)
+
+#define ppc_orcx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (412 << 1) | Rc)
+#define ppc_orc(c,A,S,B) ppc_orcx(c,A,S,B,0)
+#define ppc_orcd(c,A,S,B) ppc_orcx(c,A,S,B,1)
+
+#define ppc_oris(c,A,S,UIMM) ppc_emit32(c, (25 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+#define ppc_rfi(c) ppc_emit32(c, (19 << 26) | (0 << 11) | (50 << 1) | 0)
+
+#define ppc_rlwimix(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (20 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwimi(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,0)
+#define ppc_rlwimid(c,A,S,SH,MB,ME) ppc_rlwimix(c,A,S,SH,MB,ME,1)
+
+#define ppc_rlwinmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (21 << 26) | ((S) << 21) | ((A) << 16) | ((SH) << 11) | ((MB) << 6) | ((ME) << 1) | (Rc))
+#define ppc_rlwinm(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwinmd(c,A,S,SH,MB,ME) ppc_rlwinmx(c,A,S,SH,MB,ME,1)
+#define ppc_extlwi(c,A,S,n,b) ppc_rlwinm(c,A,S, b, 0, (n) - 1)
+#define ppc_extrwi(c,A,S,n,b) ppc_rlwinm(c,A,S, (b) + (n), 32 - (n), 31)
+#define ppc_rotlwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31)
+#define ppc_rotrwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), 0, 31)
+#define ppc_slwi(c,A,S,n) ppc_rlwinm(c,A,S, n, 0, 31 - (n))
+#define ppc_srwi(c,A,S,n) ppc_rlwinm(c,A,S, 32 - (n), n, 31)
+#define ppc_clrlwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, n, 31)
+#define ppc_clrrwi(c,A,S,n) ppc_rlwinm(c,A,S, 0, 0, 31 - (n))
+#define ppc_clrlslwi(c,A,S,b,n) ppc_rlwinm(c,A,S, n, (b) - (n), 31 - (n))
+
+#define ppc_rlwnmx(c,A,S,SH,MB,ME,Rc) ppc_emit32(c, (23 << 26) | (S << 21) | (A << 16) | (SH << 11) | (MB << 6) | (ME << 1) | Rc)
+#define ppc_rlwnm(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,0)
+#define ppc_rlwnmd(c,A,S,SH,MB,ME) ppc_rlwnmx(c,A,S,SH,MB,ME,1)
+
+#define ppc_sc(c) ppc_emit32(c, (17 << 26) | (0 << 2) | (1 << 1) | 0)
+
+#define ppc_slwx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (24 << 1) | Rc)
+#define ppc_slw(c,S,A,B) ppc_slwx(c,S,A,B,0)
+#define ppc_slwd(c,S,A,B) ppc_slwx(c,S,A,B,1)
+
+#define ppc_srawx(c,A,S,B,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (792 << 1) | Rc)
+#define ppc_sraw(c,A,S,B) ppc_srawx(c,A,S,B,0)
+#define ppc_srawd(c,A,S,B) ppc_srawx(c,A,S,B,1)
+
+#define ppc_srawix(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (824 << 1) | Rc)
+#define ppc_srawi(c,A,S,B) ppc_srawix(c,A,S,B,0)
+#define ppc_srawid(c,A,S,B) ppc_srawix(c,A,S,B,1)
+
+#define ppc_srwx(c,A,S,SH,Rc) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (SH << 11) | (536 << 1) | Rc)
+#define ppc_srw(c,A,S,B) ppc_srwx(c,A,S,B,0)
+#define ppc_srwd(c,A,S,B) ppc_srwx(c,A,S,B,1)
+
+#define ppc_stbu(c,S,d,A) ppc_emit32(c, (39 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stbux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (247 << 1) | 0)
+#define ppc_stbx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (215 << 1) | 0)
+
+#define ppc_stfdu(c,S,d,A) ppc_emit32(c, (55 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+
+#define ppc_stfdx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (727 << 1) | 0)
+#define ppc_stfiwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (983 << 1) | 0)
+
+#define ppc_stfsu(c,S,d,A) ppc_emit32(c, (53 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_stfsux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (695 << 1) | 0)
+#define ppc_stfsx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (663 << 1) | 0)
+#define ppc_sthbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (918 << 1) | 0)
+#define ppc_sthu(c,S,d,A) ppc_emit32(c, (45 << 26) | (S << 21) | (A << 16) | (guint16)(d))
+#define ppc_sthux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (439 << 1) | 0)
+#define ppc_sthx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (407 << 1) | 0)
+#define ppc_stmw(c,S,d,A) ppc_emit32(c, (47 << 26) | (S << 21) | (A << 16) | (guint16)d)
+#define ppc_stswi(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (725 << 1) | 0)
+#define ppc_stswx(c,S,A,NB) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (NB << 11) | (661 << 1) | 0)
+#define ppc_stwbrx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (662 << 1) | 0)
+#define ppc_stwcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (150 << 1) | 1)
+#define ppc_stwux(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (183 << 1) | 0)
+#define ppc_stwx(c,S,A,B) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (151 << 1) | 0)
+
+#define ppc_subfx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (40 << 1) | Rc)
+#define ppc_subf(c,D,A,B) ppc_subfx(c,D,A,B,0,0)
+#define ppc_subfd(c,D,A,B) ppc_subfx(c,D,A,B,0,1)
+#define ppc_subfo(c,D,A,B) ppc_subfx(c,D,A,B,1,0)
+#define ppc_subfod(c,D,A,B) ppc_subfx(c,D,A,B,1,1)
+
+#define ppc_sub(c,D,A,B) ppc_subf(c,D,B,A)
+
+#define ppc_subfcx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (8 << 1) | Rc)
+#define ppc_subfc(c,D,A,B) ppc_subfcx(c,D,A,B,0,0)
+#define ppc_subfcd(c,D,A,B) ppc_subfcx(c,D,A,B,0,1)
+#define ppc_subfco(c,D,A,B) ppc_subfcx(c,D,A,B,1,0)
+#define ppc_subfcod(c,D,A,B) ppc_subfcx(c,D,A,B,1,1)
+
+#define ppc_subfex(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (B << 11) | (OE << 10) | (136 << 1) | Rc)
+#define ppc_subfe(c,D,A,B) ppc_subfex(c,D,A,B,0,0)
+#define ppc_subfed(c,D,A,B) ppc_subfex(c,D,A,B,0,1)
+#define ppc_subfeo(c,D,A,B) ppc_subfex(c,D,A,B,1,0)
+#define ppc_subfeod(c,D,A,B) ppc_subfex(c,D,A,B,1,1)
+
+#define ppc_subfic(c,D,A,SIMM) ppc_emit32(c, (8 << 26) | (D << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_subfmex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (232 << 1) | Rc)
+#define ppc_subfme(c,D,A) ppc_subfmex(c,D,A,0,0)
+#define ppc_subfmed(c,D,A) ppc_subfmex(c,D,A,0,1)
+#define ppc_subfmeo(c,D,A) ppc_subfmex(c,D,A,1,0)
+#define ppc_subfmeod(c,D,A) ppc_subfmex(c,D,A,1,1)
+
+#define ppc_subfzex(c,D,A,OE,Rc) ppc_emit32(c, (31 << 26) | (D << 21) | (A << 16) | (0 << 11) | (OE << 10) | (200 << 1) | Rc)
+#define ppc_subfze(c,D,A) ppc_subfzex(c,D,A,0,0)
+#define ppc_subfzed(c,D,A) ppc_subfzex(c,D,A,0,1)
+#define ppc_subfzeo(c,D,A) ppc_subfzex(c,D,A,1,0)
+#define ppc_subfzeod(c,D,A) ppc_subfzex(c,D,A,1,1)
+
+#define ppc_sync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (598 << 1) | 0)
+#define ppc_tlbia(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (370 << 1) | 0)
+#define ppc_tlbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 16) | (B << 11) | (306 << 1) | 0)
+#define ppc_tlbsync(c) ppc_emit32(c, (31 << 26) | (0 << 11) | (566 << 1) | 0)
+
+#define ppc_tw(c,TO,A,B) ppc_emit32(c, (31 << 26) | (TO << 21) | (A << 16) | (B << 11) | (4 << 1) | 0)
+#define ppc_twi(c,TO,A,SIMM) ppc_emit32(c, (3 << 26) | (TO << 21) | (A << 16) | (guint16)(SIMM))
+
+#define ppc_xorx(c,A,S,B,RC) ppc_emit32(c, (31 << 26) | (S << 21) | (A << 16) | (B << 11) | (316 << 1) | RC)
+#define ppc_xor(c,A,S,B) ppc_xorx(c,A,S,B,0)
+#define ppc_xord(c,A,S,B) ppc_xorx(c,A,S,B,1)
+
+#define ppc_xori(c,S,A,UIMM) ppc_emit32(c, (26 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+#define ppc_xoris(c,S,A,UIMM) ppc_emit32(c, (27 << 26) | (S << 21) | (A << 16) | (guint16)(UIMM))
+
+/* this marks the end of my work, ct */
+
+/* PPC64 */
+
+/* The following FP instructions are not are available to 32-bit
+ implementations (prior to PowerISA-V2.01 but are available to
+ 32-bit mode programs on 64-bit PowerPC implementations and all
+ processors compliant with PowerISA-2.01 or later. */
+
+#define ppc_fcfidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (846 << 1) | (Rc))
+#define ppc_fcfid(c,D,B) ppc_fcfidx(c,D,B,0)
+#define ppc_fcfidd(c,D,B) ppc_fcfidx(c,D,B,1)
+
+#define ppc_fctidx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (814 << 1) | (Rc))
+#define ppc_fctid(c,D,B) ppc_fctidx(c,D,B,0)
+#define ppc_fctidd(c,D,B) ppc_fctidx(c,D,B,1)
+
+#define ppc_fctidzx(c,D,B,Rc) ppc_emit32(c, (63 << 26) | ((D) << 21) | (0 << 16) | ((B) << 11) | (815 << 1) | (Rc))
+#define ppc_fctidz(c,D,B) ppc_fctidzx(c,D,B,0)
+#define ppc_fctidzd(c,D,B) ppc_fctidzx(c,D,B,1)
+
+#ifdef __mono_ppc64__
+
+#define ppc_load_sequence(c,D,v) G_STMT_START { \
+ ppc_lis ((c), (D), ((guint64)(v) >> 48) & 0xffff); \
+ ppc_ori ((c), (D), (D), ((guint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define PPC_LOAD_SEQUENCE_LENGTH 20
+
+#define ppc_is_imm32(val) (((((gint64)val)>> 31) == 0) || ((((gint64)val)>> 31) == -1))
+#define ppc_is_imm48(val) (((((gint64)val)>> 47) == 0) || ((((gint64)val)>> 47) == -1))
+
+#define ppc_load48(c,D,v) G_STMT_START { \
+ ppc_li ((c), (D), ((gint64)(v) >> 32) & 0xffff); \
+ ppc_sldi ((c), (D), (D), 32); \
+ ppc_oris ((c), (D), (D), ((guint64)(v) >> 16) & 0xffff); \
+ ppc_ori ((c), (D), (D), (guint64)(v) & 0xffff); \
+ } G_STMT_END
+
+#define ppc_load(c,D,v) G_STMT_START { \
+ if (ppc_is_imm16 ((guint64)(v))) { \
+ ppc_li ((c), (D), (guint16)(guint64)(v)); \
+ } else if (ppc_is_imm32 ((guint64)(v))) { \
+ ppc_load32 ((c), (D), (guint32)(guint64)(v)); \
+ } else if (ppc_is_imm48 ((guint64)(v))) { \
+ ppc_load48 ((c), (D), (guint64)(v)); \
+ } else { \
+ ppc_load_sequence ((c), (D), (guint64)(v)); \
+ } \
+ } G_STMT_END
+
+#define ppc_load_func(c,D,v) G_STMT_START { \
+ ppc_load_sequence ((c), ppc_r11, (guint64)(gsize)(v)); \
+ ppc_ldptr ((c), ppc_r2, sizeof (gpointer), ppc_r11); \
+ ppc_ldptr ((c), (D), 0, ppc_r11); \
+ } G_STMT_END
+
+#define ppc_load_multiple_regs(c,D,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (D); __i <= 31; ++__i) { \
+ ppc_ldr ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_store_multiple_regs(c,S,d,A) G_STMT_START { \
+ int __i, __o = (d); \
+ for (__i = (S); __i <= 31; ++__i) { \
+ ppc_str ((c), __i, __o, (A)); \
+ __o += sizeof (guint64); \
+ } \
+ } G_STMT_END
+
+#define ppc_compare(c,cfrD,A,B) ppc_cmp((c), (cfrD), 1, (A), (B))
+#define ppc_compare_reg_imm(c,cfrD,A,B) ppc_cmpi((c), (cfrD), 1, (A), (B))
+#define ppc_compare_log(c,cfrD,A,B) ppc_cmpl((c), (cfrD), 1, (A), (B))
+
+#define ppc_shift_left(c,A,S,B) ppc_sld((c), (A), (S), (B))
+#define ppc_shift_left_imm(c,A,S,n) ppc_sldi((c), (A), (S), (n))
+
+#define ppc_shift_right_imm(c,A,S,B) ppc_srdi((c), (A), (S), (B))
+#define ppc_shift_right_arith_imm(c,A,S,B) ppc_sradi((c), (A), (S), (B))
+
+#define ppc_multiply(c,D,A,B) ppc_mulld((c), (D), (A), (B))
+
+#define ppc_clear_right_imm(c,A,S,n) ppc_clrrdi((c), (A), (S), (n))
+
+#define ppc_divdx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (489 << 1) | (Rc))
+#define ppc_divd(c,D,A,B) ppc_divdx(c,D,A,B,0,0)
+#define ppc_divdd(c,D,A,B) ppc_divdx(c,D,A,B,0,1)
+#define ppc_divdo(c,D,A,B) ppc_divdx(c,D,A,B,1,0)
+#define ppc_divdod(c,D,A,B) ppc_divdx(c,D,A,B,1,1)
+
+#define ppc_divdux(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (457 << 1) | (Rc))
+#define ppc_divdu(c,D,A,B) ppc_divdux(c,D,A,B,0,0)
+#define ppc_divdud(c,D,A,B) ppc_divdux(c,D,A,B,0,1)
+#define ppc_divduo(c,D,A,B) ppc_divdux(c,D,A,B,1,0)
+#define ppc_divduod(c,D,A,B) ppc_divdux(c,D,A,B,1,1)
+
+#define ppc_extswx(c,S,A,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (0 << 11) | (986 << 1) | (Rc))
+#define ppc_extsw(c,A,S) ppc_extswx(c,S,A,0)
+#define ppc_extswd(c,A,S) ppc_extswx(c,S,A,1)
+
+/* These move float to/from instuctions are only available on POWER6 in
+ native mode. These instruction are faster then the equivalent
+ store/load because they avoid the store queue and associated delays.
+ These instructions should only be used in 64-bit mode unless the
+ kernel preserves the 64-bit GPR on signals and dispatch in 32-bit
+ mode. The Linux kernel does not. */
+#define ppc_mftgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (735 << 1) | 0)
+#define ppc_mffgpr(c,T,B) ppc_emit32(c, (31 << 26) | ((T) << 21) | (0 << 16) | ((B) << 11) | (607 << 1) | 0)
+
+#define ppc_ld(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_lwa(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((ds) & 0xfffc) | 2)
+#define ppc_ldarx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (84 << 1) | 0)
+#define ppc_ldu(c,D,ds,A) ppc_emit32(c, (58 << 26) | ((D) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_ldux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (53 << 1) | 0)
+#define ppc_lwaux(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (373 << 1) | 0)
+#define ppc_ldx(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (21 << 1) | 0)
+#define ppc_lwax(c,D,A,B) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (341 << 1) | 0)
+
+#define ppc_mulhdx(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (73 << 1) | (Rc))
+#define ppc_mulhd(c,D,A,B) ppc_mulhdx(c,D,A,B,0)
+#define ppc_mulhdd(c,D,A,B) ppc_mulhdx(c,D,A,B,1)
+#define ppc_mulhdux(c,D,A,B,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | (0 << 10) | (9 << 1) | (Rc))
+#define ppc_mulhdu(c,D,A,B) ppc_mulhdux(c,D,A,B,0)
+#define ppc_mulhdud(c,D,A,B) ppc_mulhdux(c,D,A,B,1)
+
+#define ppc_mulldx(c,D,A,B,OE,Rc) ppc_emit32(c, (31 << 26) | ((D) << 21) | ((A) << 16) | ((B) << 11) | ((OE) << 10) | (233 << 1) | (Rc))
+#define ppc_mulld(c,D,A,B) ppc_mulldx(c,D,A,B,0,0)
+#define ppc_mulldd(c,D,A,B) ppc_mulldx(c,D,A,B,0,1)
+#define ppc_mulldo(c,D,A,B) ppc_mulldx(c,D,A,B,1,0)
+#define ppc_mulldod(c,D,A,B) ppc_mulldx(c,D,A,B,1,1)
+
+#define ppc_rldclx(c,A,S,B,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(MB) << 5) | (8 << 1) | (Rc))
+#define ppc_rldcl(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,0)
+#define ppc_rldcld(c,A,S,B,MB) ppc_rldclx(c,A,S,B,MB,1)
+#define ppc_rotld(c,A,S,B) ppc_rldcl(c, A, S, B, 0)
+
+#define ppc_rldcrx(c,A,S,B,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (ppc_split_5_1(ME) << 5) | (9 << 1) | (Rc))
+#define ppc_rldcr(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,0)
+#define ppc_rldcrd(c,A,S,B,ME) ppc_rldcrx(c,A,S,B,ME,1)
+
+#define ppc_rldicx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (2 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldic(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,0)
+#define ppc_rldicd(c,A,S,SH,MB) ppc_rldicx(c,S,A,SH,MB,1)
+
+#define ppc_rldiclx(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (0 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicl(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,0)
+#define ppc_rldicld(c,A,S,SH,MB) ppc_rldiclx(c,S,A,SH,MB,1)
+#define ppc_extrdi(c,A,S,n,b) ppc_rldicl(c,A,S, (b) + (n), 64 - (n))
+#define ppc_rotldi(c,A,S,n) ppc_rldicl(c,A,S, n, 0)
+#define ppc_rotrdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), 0)
+#define ppc_srdi(c,A,S,n) ppc_rldicl(c,A,S, 64 - (n), n)
+#define ppc_clrldi(c,A,S,n) ppc_rldicl(c,A,S, 0, n)
+
+#define ppc_rldicrx(c,A,S,SH,ME,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(ME) << 5) | (1 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldicr(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,0)
+#define ppc_rldicrd(c,A,S,SH,ME) ppc_rldicrx(c,A,S,SH,ME,1)
+#define ppc_extldi(c,A,S,n,b) ppc_rldicr(c, A, S, b, (n) - 1)
+#define ppc_sldi(c,A,S,n) ppc_rldicr(c, A, S, n, 63 - (n))
+#define ppc_clrrdi(c,A,S,n) ppc_rldicr(c, A, S, 0, 63 - (n))
+
+#define ppc_rldimix(c,S,A,SH,MB,Rc) ppc_emit32(c, (30 << 26) | ((S) << 21) | ((A) << 16) | (ppc_split_5_1_5(SH) << 11) | (ppc_split_5_1(MB) << 5) | (3 << 2) | (ppc_split_5_1_1(SH) << 1) | (Rc))
+#define ppc_rldimi(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,0)
+#define ppc_rldimid(c,A,S,SH,MB) ppc_rldimix(c,S,A,SH,MB,1)
+
+#define ppc_slbia(c) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | (0 << 11) | (498 << 1) | 0)
+#define ppc_slbie(c,B) ppc_emit32(c, (31 << 26) | (0 << 21) | (0 << 16) | ((B) << 11) | (434 << 1) | 0)
+#define ppc_sldx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (27 << 1) | (Rc))
+#define ppc_sld(c,A,S,B) ppc_sldx(c,S,A,B,0)
+#define ppc_sldd(c,A,S,B) ppc_sldx(c,S,A,B,1)
+
+#define ppc_sradx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (794 << 1) | (Rc))
+#define ppc_srad(c,A,S,B) ppc_sradx(c,S,A,B,0)
+#define ppc_sradd(c,A,S,B) ppc_sradx(c,S,A,B,1)
+#define ppc_sradix(c,S,A,SH,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | (((SH) & 31) << 11) | (413 << 2) | (((SH) >> 5) << 1) | (Rc))
+#define ppc_sradi(c,A,S,SH) ppc_sradix(c,S,A,SH,0)
+#define ppc_sradid(c,A,S,SH) ppc_sradix(c,S,A,SH,1)
+
+#define ppc_srdx(c,S,A,B,Rc) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (539 << 1) | (Rc))
+#define ppc_srd(c,A,S,B) ppc_srdx(c,S,A,B,0)
+#define ppc_srdd(c,A,S,B) ppc_srdx(c,S,A,B,1)
+
+#define ppc_std(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 0)
+#define ppc_stdcxd(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (214 << 1) | 1)
+#define ppc_stdu(c,S,ds,A) ppc_emit32(c, (62 << 26) | ((S) << 21) | ((A) << 16) | ((guint32)(ds) & 0xfffc) | 1)
+#define ppc_stdux(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (181 << 1) | 0)
+#define ppc_stdx(c,S,A,B) ppc_emit32(c, (31 << 26) | ((S) << 21) | ((A) << 16) | ((B) << 11) | (149 << 1) | 0)
+
+#else
+/* Always true for 32-bit */
+#define ppc_is_imm32(val) (1)
+#endif
+
+#endif
diff --git a/src/arch/s390x/.gitignore b/src/arch/s390x/.gitignore
new file mode 100644
index 0000000..341daec
--- /dev/null
+++ b/src/arch/s390x/.gitignore
@@ -0,0 +1,6 @@
+/Makefile
+/Makefile.in
+/.libs
+/.deps
+/*.la
+/*.lo
diff --git a/src/arch/s390x/ChangeLog b/src/arch/s390x/ChangeLog
new file mode 100644
index 0000000..e756d35
--- /dev/null
+++ b/src/arch/s390x/ChangeLog
@@ -0,0 +1,35 @@
+2010-03-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Remove duplicate
+
+2009-06-24 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2007-04-12 Neale Ferguson <neale@sinenomine.net>
+
+ * tramp.c: Add MONO_TYPE_PTR case.
+
+2007-01-23 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add packed attribute to several instruction structures.
+
+2006-03-13 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Fix immediate checks.
+
+2006-01-06 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add lpdbr instruction (OP_ABS).
+
+2006-01-03 Neale Ferguson <neale@sinenomine.net>
+
+ * s390x-codegen.h: Add some new instructions.
+
+2004-12-15 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h: Add some new instructions (CS, CSG, CSY, CDS, CDSG, CDSY)
+
+2004-08-03 Neale Ferguson <Neale.Ferguson@SoftwareAG-usa.com>
+
+ * s390x-codegen.h Makefile.am tramp.c: S/390 64-bit interpreter
diff --git a/src/arch/s390x/Makefile.am b/src/arch/s390x/Makefile.am
new file mode 100644
index 0000000..ce7f470
--- /dev/null
+++ b/src/arch/s390x/Makefile.am
@@ -0,0 +1,7 @@
+
+AM_CPPFLAGS = $(GLIB_CFLAGS) -I$(top_srcdir)
+
+noinst_LTLIBRARIES = libmonoarch-s390x.la
+
+libmonoarch_s390x_la_SOURCES = tramp.c s390x-codegen.h
+
diff --git a/src/arch/s390x/s390x-codegen.h b/src/arch/s390x/s390x-codegen.h
new file mode 100644
index 0000000..47e6564
--- /dev/null
+++ b/src/arch/s390x/s390x-codegen.h
@@ -0,0 +1,997 @@
+/*
+ Copyright (C) 2001 Radek Doulik
+*/
+
+#ifndef S390X_H
+#define S390X_H
+#include <glib.h>
+#include <assert.h>
+#include <limits.h>
+
+#define FLOAT_REGS 2 /* No. float registers for parms */
+#define GENERAL_REGS 5 /* No. general registers for parms */
+
+#define ARG_BASE s390_r10 /* Register for addressing arguments*/
+#define STKARG \
+ (i*(sizeof(stackval))) /* Displacement of ith argument */
+
+#define MINV_POS 160 /* MonoInvocation stack offset */
+#define STACK_POS (MINV_POS - sizeof (stackval) * sig->param_count)
+#define OBJ_POS 8
+#define TYPE_OFFSET (G_STRUCT_OFFSET (stackval, type))
+
+#define MIN_CACHE_LINE 256
+
+/*------------------------------------------------------------------*/
+/* Sequence to add an int/long long to parameters to stack_from_data*/
+/*------------------------------------------------------------------*/
+#define ADD_ISTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a float/double to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_RSTACK_PARM(i) \
+ if (fpr_param < FLOAT_REGS) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ float_pos + (fpr_param * sizeof(float) * (i))); \
+ fpr_param++; \
+ } else { \
+ stack_param += (stack_param % (i)); \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(float) * (i)); \
+ stack_param += (i); \
+ }
+
+/*------------------------------------------------------------------*/
+/* Sequence to add a structure ptr to parameters to stack_from_data */
+/*------------------------------------------------------------------*/
+#define ADD_TSTACK_PARM \
+ if (reg_param < GENERAL_REGS) { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param++; \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+#define ADD_PSTACK_PARM(r, i) \
+ if (reg_param < GENERAL_REGS-(r)) { \
+ s390_lay (p, s390_r4, 0, STK_BASE, \
+ local_start + (reg_param - this_flag) * sizeof(long)); \
+ reg_param += (i); \
+ } else { \
+ s390_ly (p, s390_r4, 0, STK_BASE, \
+ sz.stack_size + MINV_POS + stack_param * sizeof(long)); \
+ stack_param++; \
+ }
+
+typedef enum {
+ s390_r0 = 0,
+ s390_r1,
+ s390_r2,
+ s390_r3,
+ s390_r4,
+ s390_r5,
+ s390_r6,
+ s390_r7,
+ s390_r8,
+ s390_r9,
+ s390_r10,
+ s390_r11,
+ s390_r12,
+ s390_r13,
+ s390_r14,
+ s390_r15,
+} S390IntRegister;
+
+typedef enum {
+ s390_f0 = 0,
+ s390_f1,
+ s390_f2,
+ s390_f3,
+ s390_f4,
+ s390_f5,
+ s390_f6,
+ s390_f7,
+ s390_f8,
+ s390_f9,
+ s390_f10,
+ s390_f11,
+ s390_f12,
+ s390_f13,
+ s390_f14,
+ s390_f15,
+} S390FloatRegister;
+
+typedef enum {
+ s390_a0 = 0,
+ s390_a1,
+ s390_a2,
+ s390_a3,
+ s390_a4,
+ s390_a5,
+ s390_a6,
+ s390_a7,
+ s390_a8,
+ s390_a9,
+ s390_a10,
+ s390_a11,
+ s390_a12,
+ s390_a13,
+ s390_a14,
+ s390_a15,
+} S390AccRegister;
+
+typedef enum {
+ s390_fpc = 256,
+} S390SpecialRegister;
+
+#define s390_is_imm16(val) ((glong)val >= (glong) SHRT_MIN && \
+ (glong)val <= (glong) SHRT_MAX)
+#define s390_is_imm32(val) ((glong)val >= (glong) INT_MIN && \
+ (glong)val <= (glong) INT_MAX)
+#define s390_is_uimm16(val) ((glong)val >= 0 && (glong)val <= (glong) USHRT_MAX)
+#define s390_is_uimm32(val) ((glong)val >= 0 && (glong)val <= (glong) UINT_MAX)
+#define s390_is_uimm20(val) ((glong)val >= 0 && (glong)val <= 1048575)
+#define s390_is_imm20(val) ((glong)val >= -524288 && (glong)val <= 524287)
+#define s390_is_imm12(val) ((glong)val >= (glong)-4096 && \
+ (glong)val <= (glong)4095)
+#define s390_is_uimm12(val) ((glong)val >= 0 && (glong)val <= 4095)
+
+#define STK_BASE s390_r15
+#define S390_SP s390_r15
+#define S390_FP s390_r11
+#define S390_MINIMAL_STACK_SIZE 160
+#define S390_REG_SAVE_OFFSET 48
+#define S390_PARM_SAVE_OFFSET 16
+#define S390_RET_ADDR_OFFSET 112
+#define S390_FLOAT_SAVE_OFFSET 128
+
+#define S390_CC_ZR 8
+#define S390_CC_NE 7
+#define S390_CC_NZ 7
+#define S390_CC_LT 4
+#define S390_CC_GT 2
+#define S390_CC_GE 11
+#define S390_CC_NM 11
+#define S390_CC_LE 13
+#define S390_CC_OV 1
+#define S390_CC_NO 14
+#define S390_CC_CY 3
+#define S390_CC_NC 12
+#define S390_CC_UN 15
+
+#define s390_word(addr, value) do \
+{ \
+ * (guint32 *) addr = (guint32) value; \
+ addr += sizeof(guint32); \
+} while (0)
+
+#define s390_float(addr, value) do \
+{ \
+ * (gfloat *) addr = (gfloat) value; \
+ addr += sizeof(gfloat); \
+} while (0)
+
+#define s390_llong(addr, value) do \
+{ \
+ * (guint64 *) addr = (guint64) value; \
+ addr += sizeof(guint64); \
+} while (0)
+
+#define s390_double(addr, value) do \
+{ \
+ * (gdouble *) addr = (gdouble) value; \
+ addr += sizeof(gdouble); \
+} while (0)
+
+typedef struct {
+ short op;
+} E_Format;
+
+typedef struct {
+ char op;
+ int im;
+} I_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r2 : 4;
+} RR_Format;
+
+typedef struct {
+ short op;
+ char xx;
+ char r1 : 4;
+ char r2 : 4;
+} RRE_Format;
+
+typedef struct {
+ short op;
+ char r1 : 4;
+ char xx : 4;
+ char r3 : 4;
+ char r2 : 4;
+} RRF_Format_1;
+
+typedef struct {
+ short op;
+ char m3 : 4;
+ char xx : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_2;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char m4 : 4;
+ char r1 : 4;
+ char r2 : 4;
+} RRF_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ short d2 : 12;
+} RX_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char xx;
+ char op2;
+} RXE_Format;
+
+typedef struct {
+ char op1;
+ char r3 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 12;
+ char r1 : 4;
+ char xx : 4;
+ char op2;
+} RXF_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char x2 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RXY_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_1;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char xx : 4;
+ char b2 : 4;
+ int d2 : 12;
+} RS_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char m3 : 4;
+ char b2 : 4;
+ int d2 : 20;
+ char op2;
+} __attribute__ ((packed)) RSY_Format_2;
+
+typedef struct {
+ char op1;
+ char l1 : 4;
+ char xx : 4;
+ char b1 : 4;
+ int d1 : 12;
+ char yy;
+ char op2;
+} RSL_Format;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+} RSI_Format;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ short i2;
+} RI_Format;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char xx;
+ char op2;
+} RIE_Format_1;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short i2;
+ char m2 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_2;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char r3 : 4;
+ short d;
+ char i;
+ char op2;
+} RIE_Format_3;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char yy : 4;
+ short i2;
+ char m3 : 4;
+ char xx : 4;
+ char op2;
+} RIE_Format_4;
+
+typedef struct {
+ char op1;
+ char r1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_1;
+
+typedef struct {
+ char op1;
+ char m1 : 4;
+ char op2 : 4;
+ int i2;
+} __attribute__ ((packed)) RIL_Format_2;
+
+typedef struct {
+ char op;
+ char i2;
+ char b1 : 4;
+ short d1 : 12;
+} SI_Format;
+
+typedef struct {
+ char op1;
+ char i2;
+ char b1 : 4;
+ int d1 : 20;
+ char op2;
+} __attribute__ ((packed)) SIY_Format;
+
+typedef struct {
+ short op;
+ char b2 : 4;
+ short d2 : 12;
+} S_Format;
+
+typedef struct {
+ char op;
+ char ll;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_1;
+
+typedef struct {
+ char op;
+ char l1 : 4;
+ char l2 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_2;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b1 : 4;
+ short d1 : 12;
+ char b2 : 4;
+ short d2 : 12;
+} SS_Format_3;
+
+typedef struct {
+ char op;
+ char r1 : 4;
+ char r3 : 4;
+ char b2 : 4;
+ short d2 : 12;
+ char b4 : 4;
+ short d4 : 12;
+} SS_Format_4;
+
+typedef struct {
+ short op;
+ short tb1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSE_Format;
+
+typedef struct {
+ short op;
+ char r3 : 4;
+ char o2 : 4;
+ short b1 : 4;
+ short d1 : 12;
+ short b2 : 4;
+ short d2 : 12;
+} __attribute__ ((packed)) SSF_Format;
+
+#define s390_emit16(c, x) do \
+{ \
+ *((guint16 *) c) = (guint16) x; \
+ c += sizeof(guint16); \
+} while(0)
+
+#define s390_emit32(c, x) do \
+{ \
+ *((guint32 *) c) = (guint32) x; \
+ c += sizeof(guint32); \
+} while(0)
+
+#define S390_E(c,opc) s390_emit16(c,opc)
+
+#define S390_I(c,opc,imm) s390_emit16(c, (opc << 8 | imm))
+
+#define S390_RR(c,opc,g1,g2) s390_emit16(c, (opc << 8 | (g1) << 4 | g2))
+
+#define S390_RRE(c,opc,g1,g2) s390_emit32(c, (opc << 16 | (g1) << 4 | g2))
+
+#define S390_RRF_1(c,opc,g1,g2,g3) s390_emit32(c, (opc << 16 | (g1) << 12 | (g3) << 4 | g2))
+
+#define S390_RRF_2(c,opc,g1,k3,g2) s390_emit32(c, (opc << 16 | (k3) << 12 | (g1) << 4 | g2))
+
+#define S390_RRF_3(c,opc,g1,g2,k4,g3) s390_emit32(c, (opc << 16 | (g3) << 12 | (k4) << 8 | (g1) << 4 | g2))
+
+#define S390_RX(c,opc,g1,n2,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (n2) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RXE(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RXY(c,opc,g1,n2,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | n2)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RS_1(c,opc,g1,g3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_2(c,opc,g1,k3,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (k3) << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RS_3(c,opc,g1,s2,p2) s390_emit32(c, (opc << 24 | (g1) << 20 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_RSY_1(c,opc,g1,g3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSY_2(c,opc,g1,k3,s2,p2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | k3)); \
+ s390_emit32(c, ((s2) << 28 | (((p2) & 0xfff) << 16) | \
+ ((((p2) & 0xff000) >> 12) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSL(c,opc,ln,s1,p1) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (ln) << 4)); \
+ s390_emit32(c, ((s1) << 28 | ((s1 & 0xfff) << 16) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_RSI(c,opc,g1,g3,m2) s390_emit32(c, (opc << 24 | (g1) << 20 | (g3) << 16 | (m2 & 0xffff)))
+
+#define S390_RI(c,opc,g1,m2) s390_emit32(c, ((opc >> 4) << 24 | (g1) << 20 | (opc & 0x0f) << 16 | (m2 & 0xffff)))
+
+#define S390_RIE_1(c,opc,g1,g3,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit32(c, ((m2) << 16 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_2(c,opc,g1,g2,m3,v) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | g3)); \
+ s390_emit16(c, (v)); \
+ s390_emit16(c, ((m2) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_3(c,opc,g1,i,m3,d) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4 | m3)); \
+ s390_emit16(c, (d)); \
+ s390_emit16(c, ((i) << 8 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIE_4(c,opc,g1,i2,m3) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | (g1) << 4); \
+ s390_emit16(c, (i2)); \
+ s390_emit16(c, ((m3) << 12 | (opc & 0xff))); \
+} while (0)
+
+#define S390_RIL_1(c,opc,g1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (g1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIL_2(c,opc,k1,m2) do \
+{ \
+ s390_emit16(c, ((opc >> 4) << 8 | (k1) << 4 | (opc & 0xf))); \
+ s390_emit32(c, m2); \
+} while (0)
+
+#define S390_RIS(c,opc,r,i,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((i) << 4) | ((opc) & 0xff)); \
+}
+
+#define S390_RRS(c,opc,r1,r2,m3,b,d) do \
+{ \
+ s390_emit16(c, ((opc, & 0xff00) | (r1) << 4) | (r2)); \
+ s390_emit16(c, ((b) << 12) | (d)); \
+ s390_emit16(c, ((m3) << 12) | ((opc) & 0xff)); \
+}
+
+#define S390_SI(c,opc,s1,p1,m2) s390_emit32(c, (opc << 24 | (m2) << 16 | (s1) << 12 | ((p1) & 0xfff)));
+
+#define S390_SIY(c,opc,s1,p1,m2) do \
+{ \
+ s390_emit16(c, ((opc & 0xff00) | m2)); \
+ s390_emit32(c, ((s1) << 24 | (((p2) & 0xfffff) << 8) | \
+ (opc & 0xff))); \
+} while (0)
+
+#define S390_S(c,opc,s2,p2) s390_emit32(c, (opc << 16 | (s2) << 12 | ((p2) & 0xfff)))
+
+#define S390_SS_1(c,opc,ln,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | ((ln-1) & 0xff) << 16 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_2(c,opc,n1,n2,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (n1) << 16 | (n2) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_3(c,opc,g1,g3,s1,p1,s2,p2) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SS_4(c,opc,g1,g3,s2,p2,s4,p4) do \
+{ \
+ s390_emit32(c, (opc << 24 | (g1) << 16 | (g3) << 12 | \
+ (s2) << 12 | ((p2) & 0xfff))); \
+ s390_emit16(c, ((s4) << 12 | ((p4) & 0xfff))); \
+} while (0)
+
+#define S390_SSE(c,opc,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, opc); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define S390_SSF(c,opc,r3,s1,p1,s2,p2) do \
+{ \
+ s390_emit16(c, (((opc) & 0xff00) << 8) | ((r3) << 4) | \
+ ((opc) & 0xf)); \
+ s390_emit16(c, ((s1) << 12 | ((p1) & 0xfff))); \
+ s390_emit16(c, ((s2) << 12 | ((p2) & 0xfff))); \
+} while (0)
+
+#define s390_a(c, r, x, b, d) S390_RX(c, 0x5a, r, x, b, d)
+#define s390_adb(c, r, x, b, d) S390_RXE(c, 0xed1a, r, x, b, d)
+#define s390_adbr(c, r1, r2) S390_RRE(c, 0xb31a, r1, r2)
+#define s390_aebr(c, r1, r2) S390_RRE(c, 0xb30a, r1, r2)
+#define s390_afi(c, r, v) S390_RIL_1(c, 0xc29, r, v);
+#define s390_ag(c, r, x, b, d) S390_RXY(c, 0xe308, r, x, b, d)
+#define s390_agf(c, r, x, b, d) S390_RXY(c, 0xe318, r, x, b, d)
+#define s390_agfi(c, r, v) S390_RIL_1(c, 0xc28, r, v)
+#define s390_afgr(c, r1, r2) S390_RRE(c, 0xb918, r1, r2)
+#define s390_ag