summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config/rs6000
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>2002-02-01 18:16:02 +0000
committerobrien <obrien@FreeBSD.org>2002-02-01 18:16:02 +0000
commitc9ab9ae440a8066b2c2b85b157b1fdadcf09916a (patch)
tree086d9d6c8fbd4fc8fe4495059332f66bc0f8d12b /contrib/gcc/config/rs6000
parent2ecfd8bd04b63f335c1ec6295740a4bfd97a4fa6 (diff)
downloadFreeBSD-src-c9ab9ae440a8066b2c2b85b157b1fdadcf09916a.zip
FreeBSD-src-c9ab9ae440a8066b2c2b85b157b1fdadcf09916a.tar.gz
Enlist the FreeBSD-CURRENT users as testers of what is to become Gcc 3.1.0.
These bits are taken from the FSF anoncvs repo on 1-Feb-2002 08:20 PST.
Diffstat (limited to 'contrib/gcc/config/rs6000')
-rw-r--r--contrib/gcc/config/rs6000/aix.h194
-rw-r--r--contrib/gcc/config/rs6000/aix31.h77
-rw-r--r--contrib/gcc/config/rs6000/aix3newas.h60
-rw-r--r--contrib/gcc/config/rs6000/aix41.h94
-rw-r--r--contrib/gcc/config/rs6000/aix43.h223
-rw-r--r--contrib/gcc/config/rs6000/aix51.h235
-rw-r--r--contrib/gcc/config/rs6000/altivec-defs.h27
-rw-r--r--contrib/gcc/config/rs6000/altivec.h1459
-rw-r--r--contrib/gcc/config/rs6000/beos.h114
-rw-r--r--contrib/gcc/config/rs6000/darwin-tramp.asm131
-rw-r--r--contrib/gcc/config/rs6000/darwin.h229
-rw-r--r--contrib/gcc/config/rs6000/eabi-ci.asm124
-rw-r--r--contrib/gcc/config/rs6000/eabi-cn.asm115
-rw-r--r--contrib/gcc/config/rs6000/eabi.asm661
-rw-r--r--contrib/gcc/config/rs6000/eabi.h36
-rw-r--r--contrib/gcc/config/rs6000/eabiaix.h39
-rw-r--r--contrib/gcc/config/rs6000/eabialtivec.h31
-rw-r--r--contrib/gcc/config/rs6000/eabisim.h44
-rw-r--r--contrib/gcc/config/rs6000/freebsd.h66
-rw-r--r--contrib/gcc/config/rs6000/linux.h130
-rw-r--r--contrib/gcc/config/rs6000/linux64.h298
-rw-r--r--contrib/gcc/config/rs6000/linuxaltivec.h31
-rw-r--r--contrib/gcc/config/rs6000/lynx.h100
-rw-r--r--contrib/gcc/config/rs6000/mach.h43
-rw-r--r--contrib/gcc/config/rs6000/milli.exp7
-rw-r--r--contrib/gcc/config/rs6000/netbsd.h66
-rw-r--r--contrib/gcc/config/rs6000/ppc-asm.h196
-rw-r--r--contrib/gcc/config/rs6000/rs6000-protos.h186
-rw-r--r--contrib/gcc/config/rs6000/rs6000.c11145
-rw-r--r--contrib/gcc/config/rs6000/rs6000.h2974
-rw-r--r--contrib/gcc/config/rs6000/rs6000.md15598
-rw-r--r--contrib/gcc/config/rs6000/rtems.h34
-rw-r--r--contrib/gcc/config/rs6000/softfloat.h24
-rw-r--r--contrib/gcc/config/rs6000/sol-ci.asm104
-rw-r--r--contrib/gcc/config/rs6000/sol-cn.asm82
-rw-r--r--contrib/gcc/config/rs6000/sysv4.h1458
-rw-r--r--contrib/gcc/config/rs6000/sysv4le.h49
-rw-r--r--contrib/gcc/config/rs6000/t-aix4374
-rw-r--r--contrib/gcc/config/rs6000/t-beos48
-rw-r--r--contrib/gcc/config/rs6000/t-darwin33
-rw-r--r--contrib/gcc/config/rs6000/t-newas49
-rw-r--r--contrib/gcc/config/rs6000/t-ppccomm78
-rw-r--r--contrib/gcc/config/rs6000/t-ppcgas19
-rw-r--r--contrib/gcc/config/rs6000/t-ppcos8
-rw-r--r--contrib/gcc/config/rs6000/t-rs600019
-rw-r--r--contrib/gcc/config/rs6000/tramp.asm109
-rw-r--r--contrib/gcc/config/rs6000/vxppc.h56
-rw-r--r--contrib/gcc/config/rs6000/xcoff.h520
48 files changed, 37497 insertions, 0 deletions
diff --git a/contrib/gcc/config/rs6000/aix.h b/contrib/gcc/config/rs6000/aix.h
new file mode 100644
index 0000000..2738a37
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix.h
@@ -0,0 +1,194 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX.
+ Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Yes! We are AIX! */
+#define DEFAULT_ABI ABI_AIX
+#undef TARGET_AIX
+#define TARGET_AIX 1
+/* The AIX linker will discard static constructors in object files before
+ collect has a chance to see them, so scan the object files directly. */
+#define COLLECT_EXPORT_LIST
+
+/* This is the only version of nm that collect2 can work with. */
+#define REAL_NM_FILE_NAME "/usr/ucb/nm"
+
+#define USER_LABEL_PREFIX ""
+/* Don't turn -B into -L if the argument specifies a relative file name. */
+#define RELATIVE_PREFIX_NOT_LINKDIR
+
+/* Because of the above, we must have gcc search itself to find libgcc.a. */
+#define LINK_LIBGCC_SPECIAL_1
+
+/* Names to predefine in the preprocessor for this target machine. */
+#define CPP_PREDEFINES "-D_IBMR2 -D_POWER -D_AIX -D_AIX32 -D_LONG_LONG \
+-Asystem=unix -Asystem=aix -Acpu=rs6000 -Amachine=rs6000"
+
+/* Define appropriate architecture macros for preprocessor depending on
+ target switches. */
+
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %(cpp_cpu)"
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_PWR"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC ""
+
+/* Tell the assembler to assume that all undefined names are external.
+
+ Don't do this until the fixed IBM assembler is more generally available.
+ When this becomes permanently defined, the ASM_OUTPUT_EXTERNAL,
+ ASM_OUTPUT_EXTERNAL_LIBCALL, and RS6000_OUTPUT_BASENAME macros will no
+ longer be needed. Also, the extern declaration of mcount in ASM_FILE_START
+ will no longer be needed. */
+
+/* #define ASM_SPEC "-u %(asm_cpu)" */
+
+/* Default location of syscalls.exp under AIX */
+#ifndef CROSS_COMPILE
+#define LINK_SYSCALLS_SPEC "-bI:/lib/syscalls.exp"
+#else
+#define LINK_SYSCALLS_SPEC ""
+#endif
+
+/* Default location of libg.exp under AIX */
+#ifndef CROSS_COMPILE
+#define LINK_LIBG_SPEC "-bexport:/usr/lib/libg.exp"
+#else
+#define LINK_LIBG_SPEC ""
+#endif
+
+/* Define the options for the binder: Start text at 512, align all segments
+ to 512 bytes, and warn if there is text relocation.
+
+ The -bhalt:4 option supposedly changes the level at which ld will abort,
+ but it also suppresses warnings about multiply defined symbols and is
+ used by the AIX cc command. So we use it here.
+
+ -bnodelcsect undoes a poor choice of default relating to multiply-defined
+ csects. See AIX documentation for more information about this.
+
+ -bM:SRE tells the linker that the output file is Shared REusable. Note
+ that to actually build a shared library you will also need to specify an
+ export list with the -Wl,-bE option. */
+
+#define LINK_SPEC "-T512 -H512 %{!r:-btextro} -bhalt:4 -bnodelcsect\
+%{static:-bnso %(link_syscalls) } \
+%{!shared:%{g*: %(link_libg) }} %{shared:-bM:SRE}"
+
+/* Profiled library versions are used by linking with special directories. */
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+%{p:-L/lib/profiled -L/usr/lib/profiled} %{!shared:%{g*:-lg}} -lc"
+
+/* AIX word-aligns FP doubles but doubleword-aligns 64-bit ints. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \
+ ? get_inner_array_type (FIELD) \
+ : TREE_TYPE (FIELD)) == DFmode \
+ ? MIN ((COMPUTED), 32) : (COMPUTED))
+
+/* AIX increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TYPE_FIELDS (STRUCT) != 0 \
+ && DECL_MODE (TYPE_FIELDS (STRUCT)) == DFmode \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 64) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+
+
+
+/* Indicate that jump tables go in the text section. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Enable AIX XL compiler calling convention breakage compatibility. */
+#undef TARGET_XL_CALL
+#define MASK_XL_CALL 0x40000000
+#define TARGET_XL_CALL (target_flags & MASK_XL_CALL)
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ {"xl-call", MASK_XL_CALL, \
+ N_("Always pass floating-point arguments in memory") }, \
+ {"no-xl-call", - MASK_XL_CALL, \
+ N_("Don't always pass floating-point arguments in memory") }, \
+ SUBSUBTARGET_SWITCHES
+#define SUBSUBTARGET_SWITCHES
+
+/* Define any extra SPECS that the compiler needs to generate. */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "link_syscalls", LINK_SYSCALLS_SPEC }, \
+ { "link_libg", LINK_LIBG_SPEC }
+
+/* Define cutoff for using external functions to save floating point. */
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63)
+
+/* Optabs entries for the int->float routines and quad FP operations
+ using the standard AIX names. */
+#define ADDTF3_LIBCALL "_xlqadd"
+#define DIVTF3_LIBCALL "_xlqdiv"
+#define MULTF3_LIBCALL "_xlqmul"
+#define SUBTF3_LIBCALL "_xlqsub"
+
+#define INIT_TARGET_OPTABS \
+ do { \
+ if (! TARGET_POWER2 && ! TARGET_POWERPC && TARGET_HARD_FLOAT) \
+ { \
+ fixdfsi_libfunc = init_one_libfunc (RS6000_ITRUNC); \
+ fixunsdfsi_libfunc = init_one_libfunc (RS6000_UITRUNC); \
+ } \
+ if (TARGET_HARD_FLOAT) \
+ { \
+ add_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (ADDTF3_LIBCALL); \
+ sub_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (SUBTF3_LIBCALL); \
+ smul_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (MULTF3_LIBCALL); \
+ sdiv_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (DIVTF3_LIBCALL); \
+ } \
+ } while (0)
+
+/* AIX always has a TOC. */
+#define TARGET_NO_TOC 0
+#define TARGET_TOC 1
+
+#define FIXED_R2 1
+/* AIX allows r13 to be used. */
+#define FIXED_R13 0
+
+/* __throw will restore its own return address to be the same as the
+ return address of the function that the throw is being made to.
+ This is unfortunate, because we want to check the original
+ return address to see if we need to restore the TOC.
+ So we have to squirrel it away with this. */
+#define SETUP_FRAME_ADDRESSES() rs6000_aix_emit_builtin_unwind_init ()
+
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION ;
diff --git a/contrib/gcc/config/rs6000/aix31.h b/contrib/gcc/config/rs6000/aix31.h
new file mode 100644
index 0000000..688e588
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix31.h
@@ -0,0 +1,77 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 running AIX version 3.1.
+ Copyright (C) 1993,1997, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Output something to declare an external symbol to the assembler. Most
+ assemblers don't need this.
+
+ If we haven't already, add "[RW]" (or "[DS]" for a function) to the
+ name. Normally we write this out along with the name. In the few cases
+ where we can't, it gets stripped off. */
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+{ rtx _symref = XEXP (DECL_RTL (DECL), 0); \
+ if ((TREE_CODE (DECL) == VAR_DECL \
+ || TREE_CODE (DECL) == FUNCTION_DECL) \
+ && (NAME)[strlen (NAME) - 1] != ']') \
+ { \
+ char *_name = (char *) permalloc (strlen (XSTR (_symref, 0)) + 5); \
+ strcpy (_name, XSTR (_symref, 0)); \
+ strcat (_name, TREE_CODE (DECL) == FUNCTION_DECL ? "[DS]" : "[RW]"); \
+ XSTR (_symref, 0) = _name; \
+ } \
+ fputs ("\t.extern ", FILE); \
+ assemble_name (FILE, XSTR (_symref, 0)); \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ { \
+ fputs ("\n\t.extern .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, XSTR (_symref, 0)); \
+ } \
+ putc ('\n', FILE); \
+}
+
+/* Similar, but for libcall. We only have to worry about the function name,
+ not that of the descriptor. */
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+{ fputs ("\t.extern .", FILE); \
+ assemble_name (FILE, XSTR (FUN, 0)); \
+ putc ('\n', FILE); \
+}
+
+/* AIX 3.2 defined _AIX32, but older versions do not. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D_IBMR2 -D_AIX -Asystem=unix -Asystem=aix -Acpu=rs6000 -Amachine=rs6000"
+
+/* AIX 3.1 uses bit 15 in CROR as the magic nop. */
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "cror 15,15,15"
+
+/* AIX 3.1 does not prepend underscores to itrunc, uitrunc, or mcount. */
+#undef RS6000_ITRUNC
+#define RS6000_ITRUNC "itrunc"
+#undef RS6000_UITRUNC
+#define RS6000_UITRUNC "uitrunc"
+#undef RS6000_MCOUNT
+#define RS6000_MCOUNT ".mcount"
+
diff --git a/contrib/gcc/config/rs6000/aix3newas.h b/contrib/gcc/config/rs6000/aix3newas.h
new file mode 100644
index 0000000..bc6dc9b
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix3newas.h
@@ -0,0 +1,60 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX version 3.x with the fixed assembler.
+ Copyright (C) 1995, 1996, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Jason Merrill (jason@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Tell the assembler to assume that all undefined names are external. */
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %(asm_cpu)"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mpwr"
+
+/* Define the options for the binder: Start text at 512, align all segments
+ to 512 bytes, and warn if there is text relocation.
+
+ The -bhalt:4 option supposedly changes the level at which ld will abort,
+ but it also suppresses warnings about multiply defined symbols and is
+ used by the AIX cc command. So we use it here.
+
+ -bnodelcsect undoes a poor choice of default relating to multiply-defined
+ csects. See AIX documentation for more information about this.
+
+ -bM:SRE tells the linker that the output file is Shared REusable. Note
+ that to actually build a shared library you will also need to specify an
+ export list with the -Wl,-bE option.
+
+ If -mcpu=common, export the architecture dependent multiply/divide routines
+ as per README.RS6000. */
+
+#undef LINK_SPEC
+#ifndef CROSS_COMPILE
+#define LINK_SPEC "-T512 -H512 %{!r:-btextro} -bhalt:4 -bnodelcsect\
+ %{static:-bnso -bI:/lib/syscalls.exp} \
+ %{mcpu=common: milli.exp%s} \
+ %{!shared:%{g*:-bexport:/usr/lib/libg.exp}} %{shared:-bM:SRE}"
+#else
+#define LINK_SPEC "-T512 -H512 %{!r:-btextro} -bhalt:4 -bnodelcsect\
+ %{static:-bnso} \
+ %{mcpu=common: milli.exp%s} \
+ %{shared:-bM:SRE}"
+#endif
diff --git a/contrib/gcc/config/rs6000/aix41.h b/contrib/gcc/config/rs6000/aix41.h
new file mode 100644
index 0000000..cae1a85
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix41.h
@@ -0,0 +1,94 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX version 4.1.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001
+ Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#undef SUBSUBTARGET_SWITCHES
+#define SUBSUBTARGET_SWITCHES \
+ {"pe", 0, \
+ N_("Support message passing with the Parallel Environment") },
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %(asm_cpu)"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D_IBMR2 -D_POWER -D_AIX -D_AIX32 -D_AIX41 \
+-D_LONG_LONG -Asystem=unix -Asystem=aix"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}\
+ %(cpp_cpu)"
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_COM"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC601
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled} %{!shared:%{g*:-lg}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread: -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a}\
+ %{!pthread: -lc}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{!shared: %{g*: %(link_libg) }}\
+ %{shared:-bM:SRE %{!e:-bnoentry}}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{mpe:%{pg:/usr/lpp/ppe.poe/lib/gcrt0.o}\
+ %{!pg:%{p:/usr/lpp/ppe.poe/lib/mcrt0.o}\
+ %{!p:/usr/lpp/ppe.poe/lib/crt0.o}}}\
+ %{!mpe:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}"
+
+/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
diff --git a/contrib/gcc/config/rs6000/aix43.h b/contrib/gcc/config/rs6000/aix43.h
new file mode 100644
index 0000000..93e186c
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix43.h
@@ -0,0 +1,223 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX version 4.3.
+ Copyright (C) 1998, 1999, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* AIX 4.3 and above support 64-bit executables. */
+#undef SUBSUBTARGET_SWITCHES
+#define SUBSUBTARGET_SWITCHES \
+ {"aix64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
+ N_("Compile for 64-bit pointers") }, \
+ {"aix32", - (MASK_64BIT | MASK_POWERPC64), \
+ N_("Compile for 32-bit pointers") }, \
+ {"pe", 0, \
+ N_("Support message passing with the Parallel Environment") },
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning ("-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning ("-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 -mppc64} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amonst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwr2} \
+ %{mpowerpc*: %{!mpowerpc64: -mppc}} \
+ %{mpowerpc64: -mppc64} \
+ %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwr2} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwr2} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=403: -mppc} \
+%{mcpu=505: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -mppc} \
+%{mcpu=630: -mppc} \
+%{mcpu=821: -mppc} \
+%{mcpu=860: -mppc}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D_IBMR2 -D_POWER -D_AIX -D_AIX32 -D_AIX41 -D_AIX43 \
+-D_LONG_LONG -Asystem=unix -Asystem=aix"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %{maix64: -D__64BIT__ -D_ARCH_PPC -D__LONG_MAX__=9223372036854775807L}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}\
+ %(cpp_cpu)"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_XOPEN_SOURCE=500 \
+ -D_XOPEN_SOURCE_EXTENDED=1 \
+ -D_LARGE_FILE_API \
+ -D_ALL_SOURCE \
+ %{maix64: -D__64BIT__ -D_ARCH_PPC -D__LONG_MAX__=9223372036854775807L}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}\
+ %(cpp_cpu)"
+
+/* Common CPP definitions used by CPP_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#undef CPP_CPU_SPEC
+#define CPP_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -D_ARCH_PWR}} \
+ %{mpower2: -D_ARCH_PWR2} \
+ %{mpowerpc*: -D_ARCH_PPC} \
+ %{!mpower*: %{!mpowerpc*: %(cpp_default)}}}} \
+%{mcpu=common: -D_ARCH_COM} \
+%{mcpu=power: -D_ARCH_PWR} \
+%{mcpu=power2: -D_ARCH_PWR2} \
+%{mcpu=powerpc: -D_ARCH_PPC} \
+%{mcpu=rios: -D_ARCH_PWR} \
+%{mcpu=rios1: -D_ARCH_PWR} \
+%{mcpu=rios2: -D_ARCH_PWR2} \
+%{mcpu=rsc: -D_ARCH_PWR} \
+%{mcpu=rsc1: -D_ARCH_PWR} \
+%{mcpu=rs64a: -D_ARCH_PPC} \
+%{mcpu=403: -D_ARCH_PPC} \
+%{mcpu=505: -D_ARCH_PPC} \
+%{mcpu=601: -D_ARCH_PPC -D_ARCH_PWR} \
+%{mcpu=602: -D_ARCH_PPC} \
+%{mcpu=603: -D_ARCH_PPC} \
+%{mcpu=603e: -D_ARCH_PPC} \
+%{mcpu=604: -D_ARCH_PPC} \
+%{mcpu=620: -D_ARCH_PPC} \
+%{mcpu=630: -D_ARCH_PPC} \
+%{mcpu=821: -D_ARCH_PPC} \
+%{mcpu=860: -D_ARCH_PPC}"
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_COM"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a}\
+ %{!pthread:-lc}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{mpe:%{pg:/usr/lpp/ppe.poe/lib/gcrt0.o}\
+ %{!pg:%{p:/usr/lpp/ppe.poe/lib/mcrt0.o}\
+ %{!p:/usr/lpp/ppe.poe/lib/crt0.o}}}\
+ %{!mpe:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}}"
+
+/* Since there are separate multilibs for pthreads, determine the
+ thread model based on the command-line arguments. */
+#define THREAD_MODEL_SPEC "%{pthread:posix}%{!pthread:single}"
+
+/* AIX 4.3 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* AIX 4 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
diff --git a/contrib/gcc/config/rs6000/aix51.h b/contrib/gcc/config/rs6000/aix51.h
new file mode 100644
index 0000000..ae01440
--- /dev/null
+++ b/contrib/gcc/config/rs6000/aix51.h
@@ -0,0 +1,235 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 POWER running AIX V5.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by David Edelsohn (edelsohn@gnu.org).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* AIX V5 and above support 64-bit executables. */
+#undef SUBSUBTARGET_SWITCHES
+#define SUBSUBTARGET_SWITCHES \
+ {"aix64", MASK_64BIT | MASK_POWERPC64 | MASK_POWERPC, \
+ N_("Compile for 64-bit pointers") }, \
+ {"aix32", - (MASK_64BIT | MASK_POWERPC64), \
+ N_("Compile for 32-bit pointers") }, \
+ {"pe", 0, \
+ N_("Support message passing with the Parallel Environment") },
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define NON_POWERPC_MASKS (MASK_POWER | MASK_POWER2)
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_64BIT && (target_flags & NON_POWERPC_MASKS)) \
+ { \
+ target_flags &= ~NON_POWERPC_MASKS; \
+ warning ("-maix64 and POWER architecture are incompatible"); \
+ } \
+ if (TARGET_64BIT && ! TARGET_POWERPC64) \
+ { \
+ target_flags |= MASK_POWERPC64; \
+ warning ("-maix64 requires PowerPC64 architecture remain enabled"); \
+ } \
+ if (TARGET_POWERPC64 && ! TARGET_64BIT) \
+ { \
+ error ("-maix64 required: 64-bit computation with 32-bit addressing not yet supported"); \
+ } \
+} while (0);
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %{maix64:-a64 -mppc64} %(asm_cpu)"
+
+/* Common ASM definitions used by ASM_SPEC amonst the various targets
+ for handling -mcpu=xxx switches. */
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwr2} \
+ %{mpowerpc*: %{!mpowerpc64: -mppc}} \
+ %{mpowerpc64: -mppc64} \
+ %{!mpower*: %{!mpowerpc*: %(asm_default)}}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwr2} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwr2} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=rs64a: -mppc} \
+%{mcpu=403: -mppc} \
+%{mcpu=505: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -m603} \
+%{mcpu=603e: -m603} \
+%{mcpu=604: -m604} \
+%{mcpu=604e: -m604} \
+%{mcpu=620: -mppc} \
+%{mcpu=630: -mppc} \
+%{mcpu=821: -mppc} \
+%{mcpu=860: -mppc}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mcom"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D_IBMR2 -D_POWER -D_LONG_LONG \
+-D_AIX -D_AIX32 -D_AIX41 -D_AIX43 -D_AIX51 -Asystem=unix -Asystem=aix"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE}\
+ %{ansi: -D_ANSI_C_SOURCE}\
+ %{!maix64: -D__WCHAR_TYPE__=short\\ unsigned\\ int}\
+ %{maix64: -D__64BIT__ -D_ARCH_PPC -D__LONG_MAX__=9223372036854775807L \
+ -D__WCHAR_TYPE__=unsigned\\ int}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}\
+ %(cpp_cpu)"
+
+/* The GNU C++ standard library requires that these macros be
+ defined. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC \
+ "-D_XOPEN_SOURCE=500 \
+ -D_XOPEN_SOURCE_EXTENDED=1 \
+ -D_LARGE_FILE_API \
+ -D_ALL_SOURCE \
+ %{!maix64: -D__WCHAR_TYPE__=short\\ unsigned\\ int}\
+ %{maix64: -D__64BIT__ -D_ARCH_PPC -D__LONG_MAX__=9223372036854775807L \
+ -D__WCHAR_TYPE__=unsigned\\ int}\
+ %{mpe: -I/usr/lpp/ppe.poe/include}\
+ %{pthread: -D_THREAD_SAFE}\
+ %(cpp_cpu)"
+
+/* Common CPP definitions used by CPP_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#undef CPP_CPU_SPEC
+#define CPP_CPU_SPEC \
+"%{!mcpu*: %{!maix64: \
+ %{mpower: %{!mpower2: -D_ARCH_PWR}} \
+ %{mpower2: -D_ARCH_PWR2} \
+ %{mpowerpc*: -D_ARCH_PPC} \
+ %{!mpower*: %{!mpowerpc*: %(cpp_default)}}}} \
+%{mcpu=common: -D_ARCH_COM} \
+%{mcpu=power: -D_ARCH_PWR} \
+%{mcpu=power2: -D_ARCH_PWR2} \
+%{mcpu=powerpc: -D_ARCH_PPC} \
+%{mcpu=rios: -D_ARCH_PWR} \
+%{mcpu=rios1: -D_ARCH_PWR} \
+%{mcpu=rios2: -D_ARCH_PWR2} \
+%{mcpu=rsc: -D_ARCH_PWR} \
+%{mcpu=rsc1: -D_ARCH_PWR} \
+%{mcpu=rs64a: -D_ARCH_PPC} \
+%{mcpu=403: -D_ARCH_PPC} \
+%{mcpu=505: -D_ARCH_PPC} \
+%{mcpu=601: -D_ARCH_PPC -D_ARCH_PWR} \
+%{mcpu=602: -D_ARCH_PPC} \
+%{mcpu=603: -D_ARCH_PPC} \
+%{mcpu=603e: -D_ARCH_PPC} \
+%{mcpu=604: -D_ARCH_PPC} \
+%{mcpu=620: -D_ARCH_PPC} \
+%{mcpu=630: -D_ARCH_PPC} \
+%{mcpu=821: -D_ARCH_PPC} \
+%{mcpu=860: -D_ARCH_PPC}"
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_COM"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT MASK_NEW_MNEMONICS
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC604
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=common" }
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-L/lib/profiled -L/usr/lib/profiled}\
+ %{p:-L/lib/profiled -L/usr/lib/profiled}\
+ %{!maix64:%{!shared:%{g*:-lg}}}\
+ %{mpe:-L/usr/lpp/ppe.poe/lib -lmpi -lvtd}\
+ %{pthread:-lpthreads} -lc"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-bpT:0x10000000 -bpD:0x20000000 %{!r:-btextro} -bnodelcsect\
+ %{static:-bnso %(link_syscalls) } %{shared:-bM:SRE %{!e:-bnoentry}}\
+ %{!maix64:%{!shared:%{g*: %(link_libg) }}} %{maix64:-b64}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:\
+ %{mpe:%{pg:/usr/lpp/ppe.poe/lib/gcrt0.o}\
+ %{!pg:%{p:/usr/lpp/ppe.poe/lib/mcrt0.o}\
+ %{!p:/usr/lpp/ppe.poe/lib/crt0.o}}}\
+ %{!mpe:\
+ %{maix64:%{pg:gcrt0_64%O%s}%{!pg:%{p:mcrt0_64%O%s}%{!p:crt0_64%O%s}}}\
+ %{!maix64:\
+ %{pthread:%{pg:gcrt0_r%O%s}%{!pg:%{p:mcrt0_r%O%s}%{!p:crt0_r%O%s}}}\
+ %{!pthread:%{pg:gcrt0%O%s}%{!pg:%{p:mcrt0%O%s}%{!p:crt0%O%s}}}}}}"
+
+/* Since there are separate multilibs for pthreads, determine the
+ thread model based on the command-line arguments. */
+#define THREAD_MODEL_SPEC "%{pthread:posix}%{!pthread:single}"
+
+/* AIX V5 typedefs ptrdiff_t as "long" while earlier releases used "int". */
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+/* __WCHAR_TYPE__ is dynamic, so do not define it statically. */
+#define NO_BUILTIN_WCHAR_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+
+/* Width of wchar_t in bits. */
+#define WCHAR_TYPE_SIZE (!TARGET_64BIT ? 16 : 32)
+#define MAX_WCHAR_TYPE_SIZE 32
+
+/* AIX V5 uses PowerPC nop (ori 0,0,0) instruction as call glue for PowerPC
+ and "cror 31,31,31" for POWER architecture. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "{cror 31,31,31|nop}"
+
+/* AIX 4.2 and above provides initialization and finalization function
+ support from linker command line. */
+#undef HAS_INIT_SECTION
+#define HAS_INIT_SECTION
+
+#undef LD_INIT_SWITCH
+#define LD_INIT_SWITCH "-binitfini"
diff --git a/contrib/gcc/config/rs6000/altivec-defs.h b/contrib/gcc/config/rs6000/altivec-defs.h
new file mode 100644
index 0000000..123e1c8
--- /dev/null
+++ b/contrib/gcc/config/rs6000/altivec-defs.h
@@ -0,0 +1,27 @@
+/* Target definitions for GNU compiler for PowerPC with AltiVec.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ rs6000_altivec_abi = 1; \
+ target_flags |= MASK_ALTIVEC; \
+} while (0)
diff --git a/contrib/gcc/config/rs6000/altivec.h b/contrib/gcc/config/rs6000/altivec.h
new file mode 100644
index 0000000..4d5b0a3
--- /dev/null
+++ b/contrib/gcc/config/rs6000/altivec.h
@@ -0,0 +1,1459 @@
+/* PowerPC AltiVec include file.
+ Copyright (C) 2002 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As a special exception, if you include this header file into source
+ files compiled by GCC, this header file does not by itself cause
+ the resulting executable to be covered by the GNU General Public
+ License. This exception does not however invalidate any other
+ reasons why the executable file might be covered by the GNU General
+ Public License. */
+
+/* Implemented to conform to the specification included in the AltiVec
+ Technology Programming Interface Manual (ALTIVECPIM/D 6/1999 Rev 0). */
+
+#ifndef _ALTIVEC_H
+#define _ALTIVEC_H 1
+
+/* Required by Motorola specs. */
+#define __VEC__ 10206
+
+#define __ALTIVEC__ 1
+
+#define __vector __attribute__((vector_size(16)))
+
+/* Dummy prototype. */
+extern void __altivec_link_error_invalid_argument ();
+
+/* You are allowed to undef this for C++ compatability. */
+#define vector __vector
+
+/* Helper macros. */
+
+#define __bin_args_eq(xtype, x, ytype, y) \
+ (__builtin_types_compatible_p (xtype, typeof (x)) \
+ && __builtin_types_compatible_p (ytype, typeof (y)))
+
+#define __un_args_eq(xtype, x) \
+ __builtin_types_compatible_p (xtype, typeof (x))
+
+#define __tern_args_eq(xtype, x, ytype, y, ztype, z) \
+ (__builtin_types_compatible_p (xtype, typeof (x)) \
+ && __builtin_types_compatible_p (ytype, typeof (y)) \
+ && __builtin_types_compatible_p (ztype, typeof (z)))
+
+#define __ch(x, y, z) __builtin_choose_expr (x, y, z)
+
+#ifdef __cplusplus
+
+/* C++ stuff here. */
+
+#else /* not C++ */
+
+/* Hairy macros that implement the AltiVec high-level programming
+ interface for C. */
+
+#define vec_add(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vaddfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_addc(a1, a2) __builtin_altivec_vaddcuw (a1, a2)
+
+#define vec_adds(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vaddubs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vaddsbs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vadduhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vaddshs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vadduws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vaddsws ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))
+
+#define vec_and(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
+ (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vand ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))))
+
+#define vec_andc(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
+ (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vandc ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))))
+
+#define vec_avg(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vavgub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vavgsb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vavguh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vavgsh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vavguw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vavgsw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_ceil(a1) __builtin_altivec_vrfip (a1)
+
+#define vec_cmpb(a1, a2) __builtin_altivec_vcmpbfp (a1, a2)
+
+#define vec_cmpeq(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vcmpequb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vcmpequh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpeqfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_cmpge(a1, a2) __builtin_altivec_vcmpgefp (a1, a2)
+
+#define vec_cmpgt(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_cmple(a1, a2) __builtin_altivec_vcmpgefp (a1, a2)
+
+#define vec_cmplt(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vcmpgtub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vcmpgtsb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vcmpgtuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vcmpgtsh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_ctf(a1, a2) \
+__ch (__bin_args_eq (vector unsigned int, a1, const char, a2), \
+ (vector float) __builtin_altivec_vcfux ((vector signed int) a1, (const char) a2), \
+__ch (__bin_args_eq (vector signed int, a1, const char, a2), \
+ (vector float) __builtin_altivec_vcfsx ((vector signed int) a1, (const char) a2), \
+ __altivec_link_error_invalid_argument ()))
+
+#define vec_cts(a1, a2) __builtin_altivec_vctsxs (a1, a2)
+
+#define vec_ctu(a1, a2) __builtin_altivec_vctuxs (a1, a2)
+
+#define vec_dss(a1) __builtin_altivec_dss (a1)
+
+#define vec_dssall() __builtin_altivec_dssall ()
+
+#define vec_dst(a1, a2, a3) __builtin_altivec_dst (a1, a2, a3)
+
+#define vec_dstst(a1, a2, a3) __builtin_altivec_dstst (a1, a2, a3)
+
+#define vec_dststt(a1, a2, a3) __builtin_altivec_dststt (a1, a2, a3)
+
+#define vec_dstt(a1, a2, a3) __builtin_altivec_dstt (a1, a2, a3)
+
+#define vec_expte(a1) __builtin_altivec_vexptefp (a1)
+
+#define vec_floor(a1) __builtin_altivec_vrfim (a1)
+
+#define vec_ld(a, b) \
+__ch (__un_args_eq (vector unsigned char *, b), \
+ (vector unsigned char) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (unsigned char *, b), \
+ (vector unsigned char) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector signed char *, b), \
+ (vector signed char) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (signed char *, b), \
+ (vector signed char) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector unsigned short *, b), \
+ (vector unsigned short) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (unsigned short *, b), \
+ (vector unsigned short) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector signed short *, b), \
+ (vector signed short) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (signed short *, b), \
+ (vector signed short) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector unsigned int *, b), \
+ (vector unsigned int) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (unsigned int *, b), \
+ (vector unsigned int) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector signed int *, b), \
+ (vector signed int) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (signed int *, b), \
+ (vector signed int) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (vector float *, b), \
+ (vector float) __builtin_altivec_lvx (a, b), \
+__ch (__un_args_eq (float *, b), \
+ (vector float) __builtin_altivec_lvx (a, b), \
+__altivec_link_error_invalid_argument ()))))))))))))))
+
+#define vec_lde(a, b) \
+__ch (__un_args_eq (unsigned char *, b), \
+ (vector unsigned char) __builtin_altivec_lvebx (a, b), \
+__ch (__un_args_eq (signed char *, b), \
+ (vector signed char) __builtin_altivec_lvebx (a, b), \
+__ch (__un_args_eq (unsigned short *, b), \
+ (vector unsigned short) __builtin_altivec_lvehx (a, b), \
+__ch (__un_args_eq (signed short *, b), \
+ (vector signed short) __builtin_altivec_lvehx (a, b), \
+__ch (__un_args_eq (unsigned int *, b), \
+ (vector unsigned int) __builtin_altivec_lvewx (a, b), \
+__ch (__un_args_eq (signed int *, b), \
+ (vector signed int) __builtin_altivec_lvewx (a, b), \
+__altivec_link_error_invalid_argument ()))))))
+
+#define vec_ldl(a, b) \
+__ch (__un_args_eq (vector unsigned char *, b), \
+ (vector unsigned char) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (unsigned char *, b), \
+ (vector unsigned char) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector signed char *, b), \
+ (vector signed char) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (signed char *, b), \
+ (vector signed char) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector unsigned short *, b), \
+ (vector unsigned short) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (unsigned short *, b), \
+ (vector unsigned short) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector signed short *, b), \
+ (vector signed short) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (signed short *, b), \
+ (vector signed short) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector unsigned int *, b), \
+ (vector unsigned int) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (unsigned int *, b), \
+ (vector unsigned int) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector signed int *, b), \
+ (vector signed int) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (signed int *, b), \
+ (vector signed int) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (vector float *, b), \
+ (vector float) __builtin_altivec_lvxl (a, b), \
+__ch (__un_args_eq (float *, b), \
+ (vector float) __builtin_altivec_lvxl (a, b), \
+__altivec_link_error_invalid_argument ()))))))))))))))
+
+#define vec_loge(a1) __builtin_altivec_vlogefp (a1)
+
+#define vec_lvsl(a1, a2) __builtin_altivec_lvsl (a1, a2)
+
+#define vec_lvsr(a1, a2) __builtin_altivec_lvsr (a1, a2)
+
+#define vec_madd(a1, a2, a3) __builtin_altivec_vmaddfp (a1, a2, a3)
+
+#define vec_madds(a1, a2, a3) __builtin_altivec_vmhaddshs (a1, a2, a3)
+
+#define vec_max(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vmaxub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vmaxsb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vmaxuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vmaxsh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vmaxuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vmaxsw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vmaxfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_mergeh(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vmrghb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vmrghh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vmrghw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_mergel(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vmrglb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vmrglh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vmrglw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_mfvscr() __builtin_altivec_mfvscr ()
+
+#define vec_min(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vminub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vminsb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vminuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vminsh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vminuw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vminsw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vminfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_mladd(a1, a2, a3) \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed short, a3), \
+ (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector unsigned short, a2, vector unsigned short, a3), \
+ (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector signed short, a2, vector signed short, a3), \
+ (vector signed short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned short, a3), \
+ (vector unsigned short) __builtin_altivec_vmladduhm ((vector signed short) a1, (vector signed short) a2, (vector signed short) a3), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_mradds(a1, a2, a3) __builtin_altivec_vmhraddshs (a1, a2, a3)
+
+#define vec_msum(a1, a2, a3) \
+__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned int, a3), \
+ (vector unsigned int) __builtin_altivec_vmsumubm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed char, a1, vector unsigned char, a2, vector signed int, a3), \
+ (vector signed int) __builtin_altivec_vmsummbm ((vector signed char) a1, (vector signed char) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned int, a3), \
+ (vector unsigned int) __builtin_altivec_vmsumuhm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed int, a3), \
+ (vector signed int) __builtin_altivec_vmsumshm ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_msums(a1, a2, a3) \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned int, a3), \
+ (vector unsigned int) __builtin_altivec_vmsumuhs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed int, a3), \
+ (vector signed int) __builtin_altivec_vmsumshs ((vector signed short) a1, (vector signed short) a2, (vector signed int) a3), \
+ __altivec_link_error_invalid_argument ()))
+
+#define vec_mtvscr(a1) \
+__ch (__un_args_eq (vector signed int, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector unsigned int, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector signed short, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector unsigned short, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector signed char, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+__ch (__un_args_eq (vector unsigned char, a1), \
+ __builtin_altivec_mtvscr ((vector signed int) a1), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_mule(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vmuleub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed short) __builtin_altivec_vmulesb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned int) __builtin_altivec_vmuleuh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vmulesh ((vector signed short) a1, (vector signed short) a2), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_mulo(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vmuloub ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed short) __builtin_altivec_vmulosb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned int) __builtin_altivec_vmulouh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vmulosh ((vector signed short) a1, (vector signed short) a2), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_nmsub(a1, a2, a3) __builtin_altivec_vnmsubfp (a1, a2, a3)
+
+#define vec_nor(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vnor ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_or(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
+ (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vor ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))))
+
+#define vec_pack(a1, a2) \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned char) __builtin_altivec_vpkuhum ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned short) __builtin_altivec_vpkuwum ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_packpx(a1, a2) __builtin_altivec_vpkpx (a1, a2)
+
+#define vec_packs(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed char) __builtin_altivec_vpkshss ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed short) __builtin_altivec_vpkswss ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_packsu(a1, a2) \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned char) __builtin_altivec_vpkuhus ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector unsigned char) __builtin_altivec_vpkshus ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned short) __builtin_altivec_vpkuwus ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector unsigned short) __builtin_altivec_vpkswus ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))
+
+#define vec_perm(a1, a2, a3, a4) \
+__ch (__tern_args_eq (vector float, a1, vector float, a2, vector unsigned char, a3), \
+ (vector float) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector unsigned char, a3), \
+ (vector signed int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector unsigned char, a3), \
+ (vector unsigned int) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector unsigned char, a3), \
+ (vector signed short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned char, a3), \
+ (vector unsigned short) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector unsigned char, a3), \
+ (vector signed char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned char, a3), \
+ (vector unsigned char) __builtin_altivec_vperm_4si ((vector signed int) a1, (vector signed int) a2, (vector signed char) a3), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_re(a1) __builtin_altivec_vrefp (a1)
+
+#define vec_rl(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vrlb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vrlh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vrlw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_round(a1) __builtin_altivec_vrfin (a1)
+
+#define vec_rsqrte(a1) __builtin_altivec_vrsqrtefp (a1)
+
+#define vec_sel(a1, a2, a3) \
+__ch (__tern_args_eq (vector float, a1, vector float, a2, vector signed int, a3), \
+ (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector float, a1, vector float, a2, vector unsigned int, a3), \
+ (vector float) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector signed int, a3), \
+ (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, vector unsigned int, a3), \
+ (vector signed int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector signed int, a3), \
+ (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, vector unsigned int, a3), \
+ (vector unsigned int) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector signed short, a3), \
+ (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, vector unsigned short, a3), \
+ (vector signed short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector signed short, a3), \
+ (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, vector unsigned short, a3), \
+ (vector unsigned short) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector signed char, a3), \
+ (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, vector unsigned char, a3), \
+ (vector signed char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector signed char, a3), \
+ (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, vector unsigned char, a3), \
+ (vector unsigned char) __builtin_altivec_vsel_4si ((vector signed int) a1, (vector signed int) a2, (vector signed int) a3), \
+ __altivec_link_error_invalid_argument ()))))))))))))))
+
+#define vec_sl(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vslb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vslh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vslw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_sld(a1, a2, a3) \
+__ch (__tern_args_eq (vector float, a1, vector float, a2, const char, a3), \
+ (vector float) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector signed int, a1, vector signed int, a2, const char, a3), \
+ (vector signed int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector unsigned int, a1, vector unsigned int, a2, const char, a3), \
+ (vector unsigned int) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector signed short, a1, vector signed short, a2, const char, a3), \
+ (vector signed short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector unsigned short, a1, vector unsigned short, a2, const char, a3), \
+ (vector unsigned short) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector signed char, a1, vector signed char, a2, const char, a3), \
+ (vector signed char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+__ch (__tern_args_eq (vector unsigned char, a1, vector unsigned char, a2, const char, a3), \
+ (vector unsigned char) __builtin_altivec_vsldoi_4si ((vector signed int) a1, (vector signed int) a2, (const char) a3), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_sll(a1, a2) \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned short, a2), \
+ (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
+ (vector unsigned int) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned int, a2), \
+ (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
+ (vector signed short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned int, a2), \
+ (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned int, a2), \
+ (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned short, a2), \
+ (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
+ (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned short, a2), \
+ (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsl ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))))))))
+
+#define vec_slo(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector signed char, a2), \
+ (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector unsigned char, a2), \
+ (vector float) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed char, a2), \
+ (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
+ (vector unsigned int) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed char, a2), \
+ (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
+ (vector signed short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed char, a2), \
+ (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vslo ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))))
+
+#define vec_splat(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, const char, a2), \
+ (vector signed char) __builtin_altivec_vspltb ((vector signed char) a1, (const char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, const char, a2), \
+ (vector unsigned char) __builtin_altivec_vspltb ((vector signed char) a1, (const char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, const char, a2), \
+ (vector signed short) __builtin_altivec_vsplth ((vector signed short) a1, (const char) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, const char, a2), \
+ (vector unsigned short) __builtin_altivec_vsplth ((vector signed short) a1, (const char) a2), \
+__ch (__bin_args_eq (vector float, a1, const char, a2), \
+ (vector float) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
+__ch (__bin_args_eq (vector signed int, a1, const char, a2), \
+ (vector signed int) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, const char, a2), \
+ (vector unsigned int) __builtin_altivec_vspltw ((vector signed int) a1, (const char) a2), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_splat_s8(a1) __builtin_altivec_vspltisb (a1)
+
+#define vec_splat_s16(a1) __builtin_altivec_vspltish (a1)
+
+#define vec_splat_s32(a1) __builtin_altivec_vspltisw (a1)
+
+#define vec_splat_u8(a1) __builtin_altivec_vspltisb (a1)
+
+#define vec_splat_u16(a1) __builtin_altivec_vspltish (a1)
+
+#define vec_splat_u32(a1) __builtin_altivec_vspltisw (a1)
+
+#define vec_sr(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsrb ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsrh ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsrw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_sra(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsrab ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsrah ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsraw ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))
+
+#define vec_srl(a1, a2) \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned short, a2), \
+ (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
+ (vector unsigned int) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned int, a2), \
+ (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
+ (vector signed short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned int, a2), \
+ (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned int, a2), \
+ (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned short, a2), \
+ (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
+ (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned short, a2), \
+ (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsr ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))))))))
+
+#define vec_sro(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector signed char, a2), \
+ (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector unsigned char, a2), \
+ (vector float) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed char, a2), \
+ (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned char, a2), \
+ (vector unsigned int) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed char, a2), \
+ (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned char, a2), \
+ (vector signed short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed char, a2), \
+ (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned char, a2), \
+ (vector unsigned short) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsro ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))))
+
+#define vec_st(a1, a2, a3) \
+ __builtin_altivec_stvx ((vector signed int) a1, a2, a3)
+
+#define vec_stl(a1, a2, a3) \
+ __builtin_altivec_stvxl ((vector signed int) a1, a2, a3)
+
+#define vec_ste(a, b, c) \
+__ch (__un_args_eq (vector unsigned char, a), \
+ __builtin_altivec_stvebx ((vector signed char) a, b, c), \
+__ch (__un_args_eq (vector signed char, a), \
+ __builtin_altivec_stvebx ((vector signed char) a, b, c), \
+__ch (__un_args_eq (vector unsigned short, a), \
+ __builtin_altivec_stvehx ((vector signed short) a, b, c), \
+__ch (__un_args_eq (vector signed short, a), \
+ __builtin_altivec_stvehx ((vector signed short) a, b, c), \
+__ch (__un_args_eq (vector unsigned int, a), \
+ __builtin_altivec_stvewx ((vector signed int) a, b, c), \
+__ch (__un_args_eq (vector signed int, a), \
+ __builtin_altivec_stvewx ((vector signed int) a, b, c), \
+__ch (__un_args_eq (vector float, a), \
+ __builtin_altivec_stvewx ((vector signed int) a, b, c), \
+ __altivec_link_error_invalid_argument ())))))))
+
+#define vec_sub(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububm ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhm ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuwm ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vsubfp ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_subc(a1, a2) __builtin_altivec_vsubcuw (a1, a2)
+
+#define vec_subs(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vsububs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vsubsbs ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vsubuhs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vsubshs ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsubuws ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vsubsws ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ()))))))))))))
+
+#define vec_sum4s(a1, a2) \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vsum4ubs ((vector signed char) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vsum4sbs ((vector signed char) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vsum4shs ((vector signed short) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_sum2s(a1, a2) __builtin_altivec_vsum2sws (a1, a2)
+
+#define vec_sums(a1, a2) __builtin_altivec_vsumsws (a1, a2)
+
+#define vec_trunc(a1) __builtin_altivec_vrfiz (a1)
+
+#define vec_unpackh(a1) \
+__ch (__un_args_eq (vector signed char, a1), \
+ (vector signed short) __builtin_altivec_vupkhsb ((vector signed char) a1), \
+__ch (__un_args_eq (vector signed short, a1), \
+ (vector unsigned int) __builtin_altivec_vupkhpx ((vector signed short) a1), \
+__ch (__un_args_eq (vector signed short, a1), \
+ (vector signed int) __builtin_altivec_vupkhsh ((vector signed short) a1), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_unpackl(a1) \
+__ch (__un_args_eq (vector signed char, a1), \
+ (vector signed short) __builtin_altivec_vupklsb ((vector signed char) a1), \
+__ch (__un_args_eq (vector signed short, a1), \
+ (vector unsigned int) __builtin_altivec_vupklpx ((vector signed short) a1), \
+__ch (__un_args_eq (vector signed short, a1), \
+ (vector signed int) __builtin_altivec_vupklsh ((vector signed short) a1), \
+ __altivec_link_error_invalid_argument ())))
+
+#define vec_xor(a1, a2) \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector signed int, a2), \
+ (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector float, a2), \
+ (vector float) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector unsigned int) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector unsigned short) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector unsigned char) __builtin_altivec_vxor ((vector signed int) a1, (vector signed int) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))))
+
+#define vec_all_eq(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_ge(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_gt(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_in(a1, a2) __builtin_altivec_vcmpbfp_p (a1, a2)
+
+#define vec_all_le(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_lt(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_nan(a1) __builtin_altivec_vcmpeqfp_p (a1)
+
+#define vec_all_ne(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_all_nge(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+
+#define vec_all_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+
+#define vec_all_nle(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+
+#define vec_all_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+
+#define vec_all_numeric(a1) __builtin_altivec_vcmpeqfp_p (a1)
+
+#define vec_any_eq(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_ge(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_gt(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_le(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgefp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_lt(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtub_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtuw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtsw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpgtfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_nan(a1) __builtin_altivec_vcmpeqfp_p (a1)
+
+#define vec_any_ne(a1, a2) \
+__ch (__bin_args_eq (vector signed char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector signed char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector unsigned char, a1, vector unsigned char, a2), \
+ (vector signed int) __builtin_altivec_vcmpequb_p ((vector signed char) a1, (vector signed char) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector signed short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector unsigned short, a1, vector unsigned short, a2), \
+ (vector signed int) __builtin_altivec_vcmpequh_p ((vector signed short) a1, (vector signed short) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector signed int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector signed int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector unsigned int, a1, vector unsigned int, a2), \
+ (vector signed int) __builtin_altivec_vcmpequw_p ((vector signed int) a1, (vector signed int) a2), \
+__ch (__bin_args_eq (vector float, a1, vector float, a2), \
+ (vector signed int) __builtin_altivec_vcmpeqfp_p ((vector float) a1, (vector float) a2), \
+ __altivec_link_error_invalid_argument ())))))))))))))
+
+#define vec_any_nge(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+
+#define vec_any_ngt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+
+#define vec_any_nle(a1, a2) __builtin_altivec_vcmpgefp_p (a1, a2)
+
+#define vec_any_nlt(a1, a2) __builtin_altivec_vcmpgtfp_p (a1, a2)
+
+#define vec_any_numeric(a1) __builtin_altivec_vcmpeqfp_p (a1)
+
+#define vec_any_out(a1, a2) __builtin_altivec_vcmpbfp_p (a1, a2)
+
+#endif /* __cplusplus */
+
+#endif /* _ALTIVEC_H */
diff --git a/contrib/gcc/config/rs6000/beos.h b/contrib/gcc/config/rs6000/beos.h
new file mode 100644
index 0000000..f569c3c
--- /dev/null
+++ b/contrib/gcc/config/rs6000/beos.h
@@ -0,0 +1,114 @@
+/* Definitions of target machine for GNU compiler, for BeOS.
+ Copyright (C) 1997, 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Fred Fish (fnf@cygnus.com), based on aix41.h
+ from David Edelsohn (edelsohn@npac.syr.edu).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (BeOS/PowerPC)");
+
+/* Enable AIX XL compiler calling convention breakage compatibility. */
+#define MASK_XL_CALL 0x40000000
+#define TARGET_XL_CALL (target_flags & MASK_XL_CALL)
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ {"xl-call", MASK_XL_CALL, \
+ N_("Always pass floating-point arguments in memory") }, \
+ {"no-xl-call", - MASK_XL_CALL, \
+ N_("Don't always pass floating-point arguments in memory") }, \
+ {"threads", 0}, \
+ {"pe", 0},
+
+#undef ASM_SPEC
+#define ASM_SPEC "-u %(asm_cpu)"
+
+#undef CPP_PREDEFINES
+/* __POWERPC__ must be defined for some header files */
+#define CPP_PREDEFINES "-D__BEOS__ -D__POWERPC__ -Asystem=beos -Acpu=powerpc -Amachine=powerpc"
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} %(cpp_cpu)"
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_PPC"
+
+/* This is the easiest way to disable use of gcc's builtin alloca,
+ which in the current BeOS release (DR9) is a problem because of the
+ relatively low default stack size of 256K with no way to expand it.
+ So anything we compile for the BeOS target should not use the
+ builtin alloca. This also has the unwanted side effect of
+ disabling all builtin functions though. */
+
+#undef CC1_SPEC
+#define CC1_SPEC "%{!fbuiltin: -fno-builtin}"
+#undef CC1PLUS_SPEC
+#define CC1PLUS_SPEC "%{!fbuiltin: -fno-builtin}"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC603
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mcpu=powerpc" }
+
+/* These empty definitions get rid of the attempt to link in crt0.o
+ and any libraries like libc.a.
+ On BeOS the ld executable is actually a linker front end that first runs
+ the GNU linker with the -r option to generate a relocatable XCOFF output
+ file, and then runs Metrowork's linker (mwld) to generate a fully linked
+ executable. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+#undef LINK_SPEC
+#define LINK_SPEC ""
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+/* Text to write out after a CALL that may be replaced by glue code by
+ the loader. */
+
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "cror 15,15,15"
+
+/* Struct alignments are done on 4 byte boundaries for all types. */
+#undef BIGGEST_FIELD_ALIGNMENT
+#define BIGGEST_FIELD_ALIGNMENT 32
+
+/* STANDARD_INCLUDE_DIR is the equivalent of "/usr/include" on UNIX. */
+#define STANDARD_INCLUDE_DIR "/boot/develop/headers/posix"
+
+/* SYSTEM_INCLUDE_DIR is the location for system specific, non-POSIX headers. */
+#define SYSTEM_INCLUDE_DIR "/boot/develop/headers/be"
diff --git a/contrib/gcc/config/rs6000/darwin-tramp.asm b/contrib/gcc/config/rs6000/darwin-tramp.asm
new file mode 100644
index 0000000..02c7be6
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin-tramp.asm
@@ -0,0 +1,131 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause the
+ * resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why the
+ * executable file might be covered by the GNU General Public License.
+ */
+
+/* Set up trampolines. */
+
+.text
+ .align 2
+Ltrampoline_initial:
+ mflr r0
+ bl 1f
+Lfunc = .-Ltrampoline_initial
+ .long 0 /* will be replaced with function address */
+Lchain = .-Ltrampoline_initial
+ .long 0 /* will be replaced with static chain */
+1: mflr r11
+ lwz r12,0(r11) /* function address */
+ mtlr r0
+ mtctr r12
+ lwz r11,4(r11) /* static chain */
+ bctr
+
+trampoline_size = .-Ltrampoline_initial
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+ .globl ___trampoline_setup
+___trampoline_setup:
+ mflr r0 /* save return address */
+ bcl 20,31,LCF0 /* load up __trampoline_initial into r7 */
+LCF0:
+ mflr r11
+ addi r7,r11,ha16(LTRAMP-LCF0)
+ lwz r7,lo16(LTRAMP-LCF0)(r7)
+ subi r7,r7,4
+ li r8,trampoline_size /* verify trampoline big enough */
+ cmpw cr1,r8,r4
+ srwi r4,r4,2 /* # words to move */
+ addi r9,r3,-4 /* adjust pointer for lwzu */
+ mtctr r4
+ blt cr1,Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz Lmove
+
+ /* Store correct function and static chain */
+ stw r5,Lfunc(r3)
+ stw r6,Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz Lcache
+
+ /* Finally synchronize things & return */
+ sync
+ isync
+ blr
+
+Labort:
+#ifdef __DYNAMIC__
+ bl L_abort$stub
+.data
+.picsymbol_stub
+L_abort$stub:
+ .indirect_symbol _abort
+ mflr r0
+ bcl 20,31,L0$_abort
+L0$_abort:
+ mflr r11
+ addis r11,r11,ha16(L_abort$lazy_ptr-L0$_abort)
+ mtlr r0
+ lwz r12,lo16(L_abort$lazy_ptr-L0$_abort)(r11)
+ mtctr r12
+ addi r11,r11,lo16(L_abort$lazy_ptr-L0$_abort)
+ bctr
+.data
+.lazy_symbol_pointer
+L_abort$lazy_ptr:
+ .indirect_symbol _abort
+ .long dyld_stub_binding_helper
+#else
+ bl _abort
+#endif
+.data
+ .align 2
+LTRAMP:
+ .long Ltrampoline_initial
+
diff --git a/contrib/gcc/config/rs6000/darwin.h b/contrib/gcc/config/rs6000/darwin.h
new file mode 100644
index 0000000..cb6b4b77
--- /dev/null
+++ b/contrib/gcc/config/rs6000/darwin.h
@@ -0,0 +1,229 @@
+/* Target definitions for PowerPC running Darwin (Mac OS X).
+ Copyright (C) 1997, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Apple Computer Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Darwin/PowerPC)");
+
+/* The "Darwin ABI" is mostly like AIX, but with some key differences. */
+
+#define DEFAULT_ABI ABI_DARWIN
+
+/* The object file format is Mach-O. */
+
+#define TARGET_OBJECT_FORMAT OBJECT_MACHO
+
+/* We're not ever going to do TOCs. */
+
+#define TARGET_TOC 0
+#define TARGET_NO_TOC 1
+
+/* The Darwin ABI always includes AltiVec, can't be (validly) turned
+ off. */
+
+#define SUBTARGET_OVERRIDE_OPTIONS \
+ rs6000_altivec_abi = 1;
+
+#define CPP_PREDEFINES "-D__ppc__ -D__POWERPC__ -D__NATURAL_ALIGNMENT__ -D__MACH__ -D__BIG_ENDIAN__ -D__APPLE__"
+
+/* We want -fPIC by default, unless we're using -static to compile for
+ the kernel or some such. */
+
+#define CC1_SPEC "%{!static:-fPIC}"
+
+/* Make both r2 and r3 available for allocation. */
+#define FIXED_R2 0
+#define FIXED_R13 0
+
+/* Base register for access to local variables of the function. */
+
+#undef FRAME_POINTER_REGNUM
+#define FRAME_POINTER_REGNUM 30
+
+#undef PIC_OFFSET_TABLE_REGNUM
+#define PIC_OFFSET_TABLE_REGNUM 31
+
+/* Pad the outgoing args area to 16 bytes instead of the usual 8. */
+
+#undef STARTING_FRAME_OFFSET
+#define STARTING_FRAME_OFFSET \
+ (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
+ + RS6000_VARARGS_AREA \
+ + RS6000_SAVE_AREA)
+
+#undef STACK_DYNAMIC_OFFSET
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (current_function_outgoing_args_size, 16) \
+ + (STACK_POINTER_OFFSET))
+
+/* Define cutoff for using external functions to save floating point.
+ Currently on Darwin, always use inline stores. */
+
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+
+/* Always use the "debug" register names, they're what the assembler
+ wants to see. */
+
+#undef REGISTER_NAMES
+#define REGISTER_NAMES DEBUG_REGISTER_NAMES
+
+/* This outputs NAME to FILE. */
+
+#undef RS6000_OUTPUT_BASENAME
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name (FILE, NAME);
+
+/* Output before instructions. */
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#undef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.globl ", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); putc ('\n', FILE);} while (0)
+
+/* This is how to output an internal label prefix. rs6000.c uses this
+ when generating traceback tables. */
+/* Not really used for Darwin? */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL_PREFIX
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ fprintf (FILE, "%s", PREFIX)
+
+#undef TEXT_SECTION_ASM_OP
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#undef DATA_SECTION_ASM_OP
+#define DATA_SECTION_ASM_OP ".data"
+
+/* This says how to output an assembler line to define a global common
+ symbol. */
+/* ? */
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+ do { fputs (".comm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fprintf ((FILE), ",%d\n", (SIZE)); } while (0)
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %d\n", SIZE)
+
+/* Override the standard rs6000 definition. */
+
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START ";"
+
+/* FP save and restore routines. */
+#define SAVE_FP_PREFIX "._savef"
+#define SAVE_FP_SUFFIX ""
+#define RESTORE_FP_PREFIX "._restf"
+#define RESTORE_FP_SUFFIX ""
+
+/* Generate insns to call the profiler. */
+
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+
+/* Function name to call to do profiling. */
+
+#define RS6000_MCOUNT "*mcount"
+
+/* Default processor: a G4. */
+
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC7400
+
+/* Default target flag settings. Despite the fact that STMW/LMW
+ serializes, it's still a big codesize win to use them. Use FSEL by
+ default as well. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_MULTIPLE | MASK_NEW_MNEMONICS \
+ | MASK_PPC_GFXOPT)
+
+/* Since Darwin doesn't do TOCs, stub this out. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) 0
+
+/* Unlike most other PowerPC targets, chars are signed, for
+ consistency with other Darwin architectures. */
+
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR (1)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ Don't allow R0 when loading the address of, or otherwise furtling with,
+ a SYMBOL_REF. */
+
+#undef PREFERRED_RELOAD_CLASS
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (((GET_CODE (X) == CONST_DOUBLE \
+ && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \
+ ? NO_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == HIGH) \
+ ? BASE_REGS \
+ : (CLASS)))
+
+/* Fix for emit_group_load (): force large constants to be pushed via regs. */
+#define ALWAYS_PUSH_CONSTS_USING_REGS_P 1
+
+/* Darwin word-aligns FP doubles but doubleword-aligns 64-bit ints. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \
+ ? get_inner_array_type (FIELD) \
+ : TREE_TYPE (FIELD)) == DFmode \
+ ? MIN ((COMPUTED), 32) : (COMPUTED))
+
+/* Darwin increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TYPE_FIELDS (STRUCT) != 0 \
+ && DECL_MODE (TYPE_FIELDS (STRUCT)) == DFmode \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 64) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+/* XXX: Darwin supports neither .quad, or .llong, but it also doesn't
+ support 64 bit powerpc either, so this just keeps things happy. */
+#define DOUBLE_INT_ASM_OP "\t.quad\t"
+
+/* Get HOST_WIDE_INT and CONST_INT to be 32 bits, for compile time
+ space/speed. */
+#undef MAX_LONG_TYPE_SIZE
+#define MAX_LONG_TYPE_SIZE 32
diff --git a/contrib/gcc/config/rs6000/eabi-ci.asm b/contrib/gcc/config/rs6000/eabi-ci.asm
new file mode 100644
index 0000000..64ffbfc
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabi-ci.asm
@@ -0,0 +1,124 @@
+/* crti.s for eabi
+ Copyright (C) 1996, 2000 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+ As a special exception, if you link this library with files
+ compiled with GCC to produce an executable, this does not cause
+ the resulting executable to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License.
+
+ */
+
+/* This file just supplies labeled starting points for the .got* and other
+ special sections. It is linked in first before other modules. */
+
+ .file "crti.s"
+ .ident "GNU C crti.s"
+
+#include <ppc-asm.h>
+
+ .section ".got","aw"
+ .globl __GOT_START__
+ .type __GOT_START__,@object
+__GOT_START__:
+
+ .section ".got1","aw"
+ .globl __GOT1_START__
+ .type __GOT1_START__,@object
+__GOT1_START__:
+
+ .section ".got2","aw"
+ .globl __GOT2_START__
+ .type __GOT2_START__,@object
+__GOT2_START__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_START__
+ .type __FIXUP_START__,@object
+__FIXUP_START__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_START__
+ .type __SDATA_START__,@object
+ .weak _SDA_BASE_
+ .type _SDA_BASE_,@object
+__SDATA_START__:
+_SDA_BASE_:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_START__
+ .type __SBSS_START__,@object
+__SBSS_START__:
+
+ .section ".sdata2","a"
+ .weak _SDA2_BASE_
+ .type _SDA2_BASE_,@object
+ .globl __SDATA2_START__
+ .type __SDATA2_START__,@object
+__SDATA2_START__:
+_SDA2_BASE_:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_START__
+ .type __SBSS2_START__,@object
+__SBSS2_START__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_START__
+ .type __EXCEPT_START__,@object
+__EXCEPT_START__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_BEGIN__
+ .type __EH_FRAME_BEGIN__,@object
+__EH_FRAME_BEGIN__:
+
+/* Head of __init function used for static constructors. */
+ .section ".init","ax"
+ .align 2
+FUNC_START(__init)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
+
+/* Head of __fini function used for static destructors. */
+ .section ".fini","ax"
+ .align 2
+FUNC_START(__fini)
+ stwu 1,-16(1)
+ mflr 0
+ stw 0,20(1)
diff --git a/contrib/gcc/config/rs6000/eabi-cn.asm b/contrib/gcc/config/rs6000/eabi-cn.asm
new file mode 100644
index 0000000..4a01dc6
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabi-cn.asm
@@ -0,0 +1,115 @@
+/* crtn.s for eabi
+ Copyright (C) 1996, 2000 Free Software Foundation, Inc.
+ Written By Michael Meissner
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 2, or (at your option) any
+later version.
+
+In addition to the permissions in the GNU General Public License, the
+Free Software Foundation gives you unlimited permission to link the
+compiled version of this file with other programs, and to distribute
+those programs without any restriction coming from the use of this
+file. (The General Public License restrictions do apply in other
+respects; for example, they cover modification of the file, and
+distribution when not linked into another program.)
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+ As a special exception, if you link this library with files
+ compiled with GCC to produce an executable, this does not cause
+ the resulting executable to be covered by the GNU General Public License.
+ This exception does not however invalidate any other reasons why
+ the executable file might be covered by the GNU General Public License.
+
+ */
+
+/* This file just supplies labeled ending points for the .got* and other
+ special sections. It is linked in last after other modules. */
+
+ .file "crtn.s"
+ .ident "GNU C crtn.s"
+
+ .section ".got","aw"
+ .globl __GOT_END__
+ .type __GOT_END__,@object
+__GOT_END__:
+
+ .section ".got1","aw"
+ .globl __GOT1_END__
+ .type __GOT1_END__,@object
+__GOT1_END__:
+
+ .section ".got2","aw"
+ .globl __GOT2_END__
+ .type __GOT2_END__,@object
+__GOT2_END__:
+
+ .section ".fixup","aw"
+ .globl __FIXUP_END__
+ .type __FIXUP_END__,@object
+__FIXUP_END__:
+
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+ .section ".dtors","aw"
+ .globl __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".sdata","aw"
+ .globl __SDATA_END__
+ .type __SDATA_END__,@object
+__SDATA_END__:
+
+ .section ".sbss","aw",@nobits
+ .globl __SBSS_END__
+ .type __SBSS_END__,@object
+__SBSS_END__:
+
+ .section ".sdata2","a"
+ .globl __SDATA2_END__
+ .type __SDATA2_END__,@object
+__SDATA2_END__:
+
+ .section ".sbss2","a"
+ .globl __SBSS2_END__
+ .type __SBSS2_END__,@object
+__SBSS2_END__:
+
+ .section ".gcc_except_table","aw"
+ .globl __EXCEPT_END__
+ .type __EXCEPT_END__,@object
+__EXCEPT_END__:
+
+ .section ".eh_frame","aw"
+ .globl __EH_FRAME_END__
+ .type __EH_FRAME_END__,@object
+__EH_FRAME_END__:
+ .long 0
+
+/* Tail of __init function used for static constructors. */
+ .section ".init","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
+
+/* Tail of __fini function used for static destructors. */
+ .section ".fini","ax"
+ lwz 0,20(1)
+ mtlr 0
+ addi 1,1,16
+ blr
diff --git a/contrib/gcc/config/rs6000/eabi.asm b/contrib/gcc/config/rs6000/eabi.asm
new file mode 100644
index 0000000..85f2e1b
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabi.asm
@@ -0,0 +1,661 @@
+/*
+ * Special support for eabi and SVR4
+ *
+ * Copyright (C) 1995, 1996, 1998, 2000, 2001 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ * 64-bit support written by David Edelsohn
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause
+ * the resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why
+ * the executable file might be covered by the GNU General Public License.
+ */
+
+/* Do any initializations needed for the eabi environment */
+
+ .file "eabi.asm"
+ .section ".text"
+ #include "ppc-asm.h"
+
+#ifndef __powerpc64__
+
+ .section ".got2","aw"
+ .align 2
+.LCTOC1 = . /* +32768 */
+
+/* Table of addresses */
+.Ltable = .-.LCTOC1
+ .long .LCTOC1 /* address we are really at */
+
+.Lsda = .-.LCTOC1
+ .long _SDA_BASE_ /* address of the first small data area */
+
+.Lsdas = .-.LCTOC1
+ .long __SDATA_START__ /* start of .sdata/.sbss section */
+
+.Lsdae = .-.LCTOC1
+ .long __SBSS_END__ /* end of .sdata/.sbss section */
+
+.Lsda2 = .-.LCTOC1
+ .long _SDA2_BASE_ /* address of the second small data area */
+
+.Lsda2s = .-.LCTOC1
+ .long __SDATA2_START__ /* start of .sdata2/.sbss2 section */
+
+.Lsda2e = .-.LCTOC1
+ .long __SBSS2_END__ /* end of .sdata2/.sbss2 section */
+
+#ifdef _RELOCATABLE
+.Lgots = .-.LCTOC1
+ .long __GOT_START__ /* Global offset table start */
+
+.Lgotm1 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_-4 /* end of GOT ptrs before BLCL + 3 reserved words */
+
+.Lgotm2 = .-.LCTOC1
+ .long _GLOBAL_OFFSET_TABLE_+12 /* start of GOT ptrs after BLCL + 3 reserved words */
+
+.Lgote = .-.LCTOC1
+ .long __GOT_END__ /* Global offset table end */
+
+.Lgot2s = .-.LCTOC1
+ .long __GOT2_START__ /* -mrelocatable GOT pointers start */
+
+.Lgot2e = .-.LCTOC1
+ .long __GOT2_END__ /* -mrelocatable GOT pointers end */
+
+.Lfixups = .-.LCTOC1
+ .long __FIXUP_START__ /* start of .fixup section */
+
+.Lfixupe = .-.LCTOC1
+ .long __FIXUP_END__ /* end of .fixup section */
+
+.Lctors = .-.LCTOC1
+ .long __CTOR_LIST__ /* start of .ctor section */
+
+.Lctore = .-.LCTOC1
+ .long __CTOR_END__ /* end of .ctor section */
+
+.Ldtors = .-.LCTOC1
+ .long __DTOR_LIST__ /* start of .dtor section */
+
+.Ldtore = .-.LCTOC1
+ .long __DTOR_END__ /* end of .dtor section */
+
+.Lexcepts = .-.LCTOC1
+ .long __EXCEPT_START__ /* start of .gcc_except_table section */
+
+.Lexcepte = .-.LCTOC1
+ .long __EXCEPT_END__ /* end of .gcc_except_table section */
+
+.Linit = .-.LCTOC1
+ .long .Linit_p /* address of variable to say we've been called */
+
+ .text
+ .align 2
+.Lptr:
+ .long .LCTOC1-.Laddr /* PC relative pointer to .got2 */
+#endif
+
+ .data
+ .align 2
+.Linit_p:
+ .long 0
+
+ .text
+
+FUNC_START(__eabi)
+
+/* Eliminate -mrelocatable code if not -mrelocatable, so that this file can
+ be assembled with other assemblers than GAS. */
+
+#ifndef _RELOCATABLE
+ addis 10,0,.Linit_p@ha /* init flag */
+ addis 11,0,.LCTOC1@ha /* load address of .LCTOC1 */
+ lwz 9,.Linit_p@l(10) /* init flag */
+ addi 11,11,.LCTOC1@l
+ cmplwi 2,9,0 /* init flag != 0? */
+ bnelr 2 /* return now, if we've been called already */
+ stw 1,.Linit_p@l(10) /* store a non-zero value in the done flag */
+
+#else /* -mrelocatable */
+ mflr 0
+ bl .Laddr /* get current address */
+.Laddr:
+ mflr 12 /* real address of .Laddr */
+ lwz 11,(.Lptr-.Laddr)(12) /* linker generated address of .LCTOC1 */
+ add 11,11,12 /* correct to real pointer */
+ lwz 12,.Ltable(11) /* get linker's idea of where .Laddr is */
+ lwz 10,.Linit(11) /* address of init flag */
+ subf. 12,12,11 /* calculate difference */
+ lwzx 9,10,12 /* done flag */
+ cmplwi 2,9,0 /* init flag != 0? */
+ mtlr 0 /* restore in case branch was taken */
+ bnelr 2 /* return now, if we've been called already */
+ stwx 1,10,12 /* store a non-zero value in the done flag */
+ beq+ 0,.Lsdata /* skip if we don't need to relocate */
+
+/* We need to relocate the .got2 pointers. */
+
+ lwz 3,.Lgot2s(11) /* GOT2 pointers start */
+ lwz 4,.Lgot2e(11) /* GOT2 pointers end */
+ add 3,12,3 /* adjust pointers */
+ add 4,12,4
+ bl FUNC_NAME(__eabi_convert) /* convert pointers in .got2 section */
+
+/* Fixup the .ctor section for static constructors */
+
+ lwz 3,.Lctors(11) /* constructors pointers start */
+ lwz 4,.Lctore(11) /* constructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert constructors */
+
+/* Fixup the .dtor section for static destructors */
+
+ lwz 3,.Ldtors(11) /* destructors pointers start */
+ lwz 4,.Ldtore(11) /* destructors pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert destructors */
+
+/* Fixup the .gcc_except_table section for G++ exceptions */
+
+ lwz 3,.Lexcepts(11) /* exception table pointers start */
+ lwz 4,.Lexcepte(11) /* exception table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert exceptions */
+
+/* Fixup the addresses in the GOT below _GLOBAL_OFFSET_TABLE_-4 */
+
+ lwz 3,.Lgots(11) /* GOT table pointers start */
+ lwz 4,.Lgotm1(11) /* GOT table pointers below _GLOBAL_OFFSET_TABLE-4 */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup the addresses in the GOT above _GLOBAL_OFFSET_TABLE_+12 */
+
+ lwz 3,.Lgotm2(11) /* GOT table pointers above _GLOBAL_OFFSET_TABLE+12 */
+ lwz 4,.Lgote(11) /* GOT table pointers end */
+ bl FUNC_NAME(__eabi_convert) /* convert lower GOT */
+
+/* Fixup any user initialized pointers now (the compiler drops pointers to */
+/* each of the relocs that it does in the .fixup section). */
+
+.Lfix:
+ lwz 3,.Lfixups(11) /* fixup pointers start */
+ lwz 4,.Lfixupe(11) /* fixup pointers end */
+ bl FUNC_NAME(__eabi_uconvert) /* convert user initialized pointers */
+
+.Lsdata:
+ mtlr 0 /* restore link register */
+#endif /* _RELOCATABLE */
+
+/* Only load up register 13 if there is a .sdata and/or .sbss section */
+ lwz 3,.Lsdas(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsdae(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq- 1,.Lsda2l /* skip loading r13 */
+
+ lwz 13,.Lsda(11) /* load r13 with _SDA_BASE_ address */
+
+/* Only load up register 2 if there is a .sdata2 and/or .sbss2 section */
+
+.Lsda2l:
+ lwz 3,.Lsda2s(11) /* start of .sdata/.sbss section */
+ lwz 4,.Lsda2e(11) /* end of .sdata/.sbss section */
+ cmpw 1,3,4 /* .sdata/.sbss section non-empty? */
+ beq+ 1,.Ldone /* skip loading r2 */
+
+ lwz 2,.Lsda2(11) /* load r2 with _SDA2_BASE_ address */
+
+/* Done adjusting pointers, return by way of doing the C++ global constructors. */
+
+.Ldone:
+ b FUNC_NAME(__init) /* do any C++ global constructors (which returns to caller) */
+FUNC_END(__eabi)
+
+/* Special subroutine to convert a bunch of pointers directly.
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+
+FUNC_START(__eabi_convert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lcvt:
+ lwzu 6,4(3) /* pointer to convert */
+ cmpi 0,6,0
+ beq- .Lcvt2 /* if pointer is null, don't convert */
+
+ add 6,6,12 /* convert pointer */
+ stw 6,0(3)
+.Lcvt2:
+ bdnz+ .Lcvt
+ blr
+
+FUNC_END(__eabi_convert)
+
+/* Special subroutine to convert the pointers the user has initialized. The
+ compiler has placed the address of the initialized pointer into the .fixup
+ section.
+
+ r0 has original link register
+ r3 has low pointer to convert
+ r4 has high pointer to convert
+ r5 .. r10 are scratch registers
+ r11 has the address of .LCTOC1 in it.
+ r12 has the value to add to each pointer
+ r13 .. r31 are unchanged */
+
+FUNC_START(__eabi_uconvert)
+ cmplw 1,3,4 /* any pointers to convert? */
+ subf 5,3,4 /* calculate number of words to convert */
+ bclr 4,4 /* return if no pointers */
+
+ srawi 5,5,2
+ addi 3,3,-4 /* start-4 for use with lwzu */
+ mtctr 5
+
+.Lucvt:
+ lwzu 6,4(3) /* next pointer to pointer to convert */
+ add 6,6,12 /* adjust pointer */
+ lwz 7,0(6) /* get the pointer it points to */
+ stw 6,0(3) /* store adjusted pointer */
+ add 7,7,12 /* adjust */
+ stw 7,0(6)
+ bdnz+ .Lucvt
+ blr
+
+FUNC_END(__eabi_uconvert)
+
+/* Routines for saving floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+FUNC_START(_savefpr_14) stfd 14,-144(11) /* save fp registers */
+FUNC_START(_savefpr_15) stfd 15,-136(11)
+FUNC_START(_savefpr_16) stfd 16,-128(11)
+FUNC_START(_savefpr_17) stfd 17,-120(11)
+FUNC_START(_savefpr_18) stfd 18,-112(11)
+FUNC_START(_savefpr_19) stfd 19,-104(11)
+FUNC_START(_savefpr_20) stfd 20,-96(11)
+FUNC_START(_savefpr_21) stfd 21,-88(11)
+FUNC_START(_savefpr_22) stfd 22,-80(11)
+FUNC_START(_savefpr_23) stfd 23,-72(11)
+FUNC_START(_savefpr_24) stfd 24,-64(11)
+FUNC_START(_savefpr_25) stfd 25,-56(11)
+FUNC_START(_savefpr_26) stfd 26,-48(11)
+FUNC_START(_savefpr_27) stfd 27,-40(11)
+FUNC_START(_savefpr_28) stfd 28,-32(11)
+FUNC_START(_savefpr_29) stfd 29,-24(11)
+FUNC_START(_savefpr_30) stfd 30,-16(11)
+FUNC_START(_savefpr_31) stfd 31,-8(11)
+ blr
+FUNC_END(_savefpr_31)
+FUNC_END(_savefpr_30)
+FUNC_END(_savefpr_29)
+FUNC_END(_savefpr_28)
+FUNC_END(_savefpr_27)
+FUNC_END(_savefpr_26)
+FUNC_END(_savefpr_25)
+FUNC_END(_savefpr_24)
+FUNC_END(_savefpr_23)
+FUNC_END(_savefpr_22)
+FUNC_END(_savefpr_21)
+FUNC_END(_savefpr_20)
+FUNC_END(_savefpr_19)
+FUNC_END(_savefpr_18)
+FUNC_END(_savefpr_17)
+FUNC_END(_savefpr_16)
+FUNC_END(_savefpr_15)
+FUNC_END(_savefpr_14)
+
+/* Routines for saving integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer save area. */
+
+FUNC_START(_savegpr_14) stw 14,-72(11) /* save gp registers */
+FUNC_START(_savegpr_15) stw 15,-68(11)
+FUNC_START(_savegpr_16) stw 16,-64(11)
+FUNC_START(_savegpr_17) stw 17,-60(11)
+FUNC_START(_savegpr_18) stw 18,-56(11)
+FUNC_START(_savegpr_19) stw 19,-52(11)
+FUNC_START(_savegpr_20) stw 20,-48(11)
+FUNC_START(_savegpr_21) stw 21,-44(11)
+FUNC_START(_savegpr_22) stw 22,-40(11)
+FUNC_START(_savegpr_23) stw 23,-36(11)
+FUNC_START(_savegpr_24) stw 24,-32(11)
+FUNC_START(_savegpr_25) stw 25,-28(11)
+FUNC_START(_savegpr_26) stw 26,-24(11)
+FUNC_START(_savegpr_27) stw 27,-20(11)
+FUNC_START(_savegpr_28) stw 28,-16(11)
+FUNC_START(_savegpr_29) stw 29,-12(11)
+FUNC_START(_savegpr_30) stw 30,-8(11)
+FUNC_START(_savegpr_31) stw 31,-4(11)
+ blr
+FUNC_END(_savegpr_31)
+FUNC_END(_savegpr_30)
+FUNC_END(_savegpr_29)
+FUNC_END(_savegpr_28)
+FUNC_END(_savegpr_27)
+FUNC_END(_savegpr_26)
+FUNC_END(_savegpr_25)
+FUNC_END(_savegpr_24)
+FUNC_END(_savegpr_23)
+FUNC_END(_savegpr_22)
+FUNC_END(_savegpr_21)
+FUNC_END(_savegpr_20)
+FUNC_END(_savegpr_19)
+FUNC_END(_savegpr_18)
+FUNC_END(_savegpr_17)
+FUNC_END(_savegpr_16)
+FUNC_END(_savegpr_15)
+FUNC_END(_savegpr_14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+
+FUNC_START(_restfpr_14) lfd 14,-144(11) /* restore fp registers */
+FUNC_START(_restfpr_15) lfd 15,-136(11)
+FUNC_START(_restfpr_16) lfd 16,-128(11)
+FUNC_START(_restfpr_17) lfd 17,-120(11)
+FUNC_START(_restfpr_18) lfd 18,-112(11)
+FUNC_START(_restfpr_19) lfd 19,-104(11)
+FUNC_START(_restfpr_20) lfd 20,-96(11)
+FUNC_START(_restfpr_21) lfd 21,-88(11)
+FUNC_START(_restfpr_22) lfd 22,-80(11)
+FUNC_START(_restfpr_23) lfd 23,-72(11)
+FUNC_START(_restfpr_24) lfd 24,-64(11)
+FUNC_START(_restfpr_25) lfd 25,-56(11)
+FUNC_START(_restfpr_26) lfd 26,-48(11)
+FUNC_START(_restfpr_27) lfd 27,-40(11)
+FUNC_START(_restfpr_28) lfd 28,-32(11)
+FUNC_START(_restfpr_29) lfd 29,-24(11)
+FUNC_START(_restfpr_30) lfd 30,-16(11)
+FUNC_START(_restfpr_31) lfd 31,-8(11)
+ blr
+FUNC_END(_restfpr_31)
+FUNC_END(_restfpr_30)
+FUNC_END(_restfpr_29)
+FUNC_END(_restfpr_28)
+FUNC_END(_restfpr_27)
+FUNC_END(_restfpr_26)
+FUNC_END(_restfpr_25)
+FUNC_END(_restfpr_24)
+FUNC_END(_restfpr_23)
+FUNC_END(_restfpr_22)
+FUNC_END(_restfpr_21)
+FUNC_END(_restfpr_20)
+FUNC_END(_restfpr_19)
+FUNC_END(_restfpr_18)
+FUNC_END(_restfpr_17)
+FUNC_END(_restfpr_16)
+FUNC_END(_restfpr_15)
+FUNC_END(_restfpr_14)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+FUNC_START(_restgpr_14) lwz 14,-72(11) /* restore gp registers */
+FUNC_START(_restgpr_15) lwz 15,-68(11)
+FUNC_START(_restgpr_16) lwz 16,-64(11)
+FUNC_START(_restgpr_17) lwz 17,-60(11)
+FUNC_START(_restgpr_18) lwz 18,-56(11)
+FUNC_START(_restgpr_19) lwz 19,-52(11)
+FUNC_START(_restgpr_20) lwz 20,-48(11)
+FUNC_START(_restgpr_21) lwz 21,-44(11)
+FUNC_START(_restgpr_22) lwz 22,-40(11)
+FUNC_START(_restgpr_23) lwz 23,-36(11)
+FUNC_START(_restgpr_24) lwz 24,-32(11)
+FUNC_START(_restgpr_25) lwz 25,-28(11)
+FUNC_START(_restgpr_26) lwz 26,-24(11)
+FUNC_START(_restgpr_27) lwz 27,-20(11)
+FUNC_START(_restgpr_28) lwz 28,-16(11)
+FUNC_START(_restgpr_29) lwz 29,-12(11)
+FUNC_START(_restgpr_30) lwz 30,-8(11)
+FUNC_START(_restgpr_31) lwz 31,-4(11)
+ blr
+FUNC_END(_restgpr_31)
+FUNC_END(_restgpr_30)
+FUNC_END(_restgpr_29)
+FUNC_END(_restgpr_28)
+FUNC_END(_restgpr_27)
+FUNC_END(_restgpr_26)
+FUNC_END(_restgpr_25)
+FUNC_END(_restgpr_24)
+FUNC_END(_restgpr_23)
+FUNC_END(_restgpr_22)
+FUNC_END(_restgpr_21)
+FUNC_END(_restgpr_20)
+FUNC_END(_restgpr_19)
+FUNC_END(_restgpr_18)
+FUNC_END(_restgpr_17)
+FUNC_END(_restgpr_16)
+FUNC_END(_restgpr_15)
+FUNC_END(_restgpr_14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the floating point save area. */
+/* In addition to restoring the fp registers, it will return to the caller's */
+/* caller */
+
+FUNC_START(_restfpr_14_x) lfd 14,-144(11) /* restore fp registers */
+FUNC_START(_restfpr_15_x) lfd 15,-136(11)
+FUNC_START(_restfpr_16_x) lfd 16,-128(11)
+FUNC_START(_restfpr_17_x) lfd 17,-120(11)
+FUNC_START(_restfpr_18_x) lfd 18,-112(11)
+FUNC_START(_restfpr_19_x) lfd 19,-104(11)
+FUNC_START(_restfpr_20_x) lfd 20,-96(11)
+FUNC_START(_restfpr_21_x) lfd 21,-88(11)
+FUNC_START(_restfpr_22_x) lfd 22,-80(11)
+FUNC_START(_restfpr_23_x) lfd 23,-72(11)
+FUNC_START(_restfpr_24_x) lfd 24,-64(11)
+FUNC_START(_restfpr_25_x) lfd 25,-56(11)
+FUNC_START(_restfpr_26_x) lfd 26,-48(11)
+FUNC_START(_restfpr_27_x) lfd 27,-40(11)
+FUNC_START(_restfpr_28_x) lfd 28,-32(11)
+FUNC_START(_restfpr_29_x) lfd 29,-24(11)
+FUNC_START(_restfpr_30_x) lfd 30,-16(11)
+FUNC_START(_restfpr_31_x) lwz 0,4(11)
+ lfd 31,-8(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restfpr_31_x)
+FUNC_END(_restfpr_30_x)
+FUNC_END(_restfpr_29_x)
+FUNC_END(_restfpr_28_x)
+FUNC_END(_restfpr_27_x)
+FUNC_END(_restfpr_26_x)
+FUNC_END(_restfpr_25_x)
+FUNC_END(_restfpr_24_x)
+FUNC_END(_restfpr_23_x)
+FUNC_END(_restfpr_22_x)
+FUNC_END(_restfpr_21_x)
+FUNC_END(_restfpr_20_x)
+FUNC_END(_restfpr_19_x)
+FUNC_END(_restfpr_18_x)
+FUNC_END(_restfpr_17_x)
+FUNC_END(_restfpr_16_x)
+FUNC_END(_restfpr_15_x)
+FUNC_END(_restfpr_14_x)
+
+/* Routines for restoring integer registers, called by the compiler. */
+/* Called with r11 pointing to the stack header word of the caller of the */
+/* function, just beyond the end of the integer restore area. */
+
+FUNC_START(_restgpr_14_x) lwz 14,-72(11) /* restore gp registers */
+FUNC_START(_restgpr_15_x) lwz 15,-68(11)
+FUNC_START(_restgpr_16_x) lwz 16,-64(11)
+FUNC_START(_restgpr_17_x) lwz 17,-60(11)
+FUNC_START(_restgpr_18_x) lwz 18,-56(11)
+FUNC_START(_restgpr_19_x) lwz 19,-52(11)
+FUNC_START(_restgpr_20_x) lwz 20,-48(11)
+FUNC_START(_restgpr_21_x) lwz 21,-44(11)
+FUNC_START(_restgpr_22_x) lwz 22,-40(11)
+FUNC_START(_restgpr_23_x) lwz 23,-36(11)
+FUNC_START(_restgpr_24_x) lwz 24,-32(11)
+FUNC_START(_restgpr_25_x) lwz 25,-28(11)
+FUNC_START(_restgpr_26_x) lwz 26,-24(11)
+FUNC_START(_restgpr_27_x) lwz 27,-20(11)
+FUNC_START(_restgpr_28_x) lwz 28,-16(11)
+FUNC_START(_restgpr_29_x) lwz 29,-12(11)
+FUNC_START(_restgpr_30_x) lwz 30,-8(11)
+FUNC_START(_restgpr_31_x) lwz 0,4(11)
+ lwz 31,-4(11)
+ mtlr 0
+ mr 1,11
+ blr
+FUNC_END(_restgpr_31_x)
+FUNC_END(_restgpr_30_x)
+FUNC_END(_restgpr_29_x)
+FUNC_END(_restgpr_28_x)
+FUNC_END(_restgpr_27_x)
+FUNC_END(_restgpr_26_x)
+FUNC_END(_restgpr_25_x)
+FUNC_END(_restgpr_24_x)
+FUNC_END(_restgpr_23_x)
+FUNC_END(_restgpr_22_x)
+FUNC_END(_restgpr_21_x)
+FUNC_END(_restgpr_20_x)
+FUNC_END(_restgpr_19_x)
+FUNC_END(_restgpr_18_x)
+FUNC_END(_restgpr_17_x)
+FUNC_END(_restgpr_16_x)
+FUNC_END(_restgpr_15_x)
+FUNC_END(_restgpr_14_x)
+
+#else /* __powerpc64__ */
+
+ .section ".text"
+ .align 2
+
+/* Routines for saving floating point registers, called by the compiler. */
+
+.fsav:
+FUNC_START(_savef14) stfd 14,-144(1) /* save fp registers */
+FUNC_START(_savef15) stfd 15,-136(1)
+FUNC_START(_savef16) stfd 16,-128(1)
+FUNC_START(_savef17) stfd 17,-120(1)
+FUNC_START(_savef18) stfd 18,-112(1)
+FUNC_START(_savef19) stfd 19,-104(1)
+FUNC_START(_savef20) stfd 20,-96(1)
+FUNC_START(_savef21) stfd 21,-88(1)
+FUNC_START(_savef22) stfd 22,-80(1)
+FUNC_START(_savef23) stfd 23,-72(1)
+FUNC_START(_savef24) stfd 24,-64(1)
+FUNC_START(_savef25) stfd 25,-56(1)
+FUNC_START(_savef26) stfd 26,-48(1)
+FUNC_START(_savef27) stfd 27,-40(1)
+FUNC_START(_savef28) stfd 28,-32(1)
+FUNC_START(_savef29) stfd 29,-24(1)
+FUNC_START(_savef30) stfd 30,-16(1)
+FUNC_START(_savef31) stfd 31,-8(1)
+ blr
+.LTfsav:
+ .long 0
+ .byte 0,12,0,0,0,0,0,0
+ .long 0
+ .long .LTfsav-.fsav
+ .short 4
+ .ascii "fsav"
+FUNC_END(_savef31)
+FUNC_END(_savef30)
+FUNC_END(_savef29)
+FUNC_END(_savef28)
+FUNC_END(_savef27)
+FUNC_END(_savef26)
+FUNC_END(_savef25)
+FUNC_END(_savef24)
+FUNC_END(_savef23)
+FUNC_END(_savef22)
+FUNC_END(_savef21)
+FUNC_END(_savef20)
+FUNC_END(_savef19)
+FUNC_END(_savef18)
+FUNC_END(_savef17)
+FUNC_END(_savef16)
+FUNC_END(_savef15)
+FUNC_END(_savef14)
+
+/* Routines for restoring floating point registers, called by the compiler. */
+
+.fres:
+FUNC_START(_restf14) lfd 14,-144(1) /* restore fp registers */
+FUNC_START(_restf15) lfd 15,-136(1)
+FUNC_START(_restf16) lfd 16,-128(1)
+FUNC_START(_restf17) lfd 17,-120(1)
+FUNC_START(_restf18) lfd 18,-112(1)
+FUNC_START(_restf19) lfd 19,-104(1)
+FUNC_START(_restf20) lfd 20,-96(1)
+FUNC_START(_restf21) lfd 21,-88(1)
+FUNC_START(_restf22) lfd 22,-80(1)
+FUNC_START(_restf23) lfd 23,-72(1)
+FUNC_START(_restf24) lfd 24,-64(1)
+FUNC_START(_restf25) lfd 25,-56(1)
+FUNC_START(_restf26) lfd 26,-48(1)
+FUNC_START(_restf27) lfd 27,-40(1)
+FUNC_START(_restf28) lfd 28,-32(1)
+FUNC_START(_restf29) lfd 29,-24(1)
+FUNC_START(_restf30) lfd 30,-16(1)
+FUNC_START(_restf31) lfd 31,-8(1)
+ blr
+.LTfres:
+ .long 0
+ .byte 0,12,0,0,0,0,0,0
+ .long 0
+ .long .LTfres-.fres
+ .short 4
+ .ascii "fres"
+FUNC_END(_restf31)
+FUNC_END(_restf30)
+FUNC_END(_restf29)
+FUNC_END(_restf28)
+FUNC_END(_restf27)
+FUNC_END(_restf26)
+FUNC_END(_restf25)
+FUNC_END(_restf24)
+FUNC_END(_restf23)
+FUNC_END(_restf22)
+FUNC_END(_restf21)
+FUNC_END(_restf20)
+FUNC_END(_restf19)
+FUNC_END(_restf18)
+FUNC_END(_restf17)
+FUNC_END(_restf16)
+FUNC_END(_restf15)
+FUNC_END(_restf14)
+
+#endif
diff --git a/contrib/gcc/config/rs6000/eabi.h b/contrib/gcc/config/rs6000/eabi.h
new file mode 100644
index 0000000..88fb6f8
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabi.h
@@ -0,0 +1,36 @@
+/* Core target definitions for GNU compiler
+ for IBM RS/6000 PowerPC targeted to embedded ELF systems.
+ Copyright (C) 1995, 1996, 2000 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Add -meabi to target flags */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI)
+
+/* Invoke an initializer function to set up the GOT */
+#define NAME__MAIN "__eabi"
+#define INVOKE__main
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded)");
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DPPC -D__embedded__ -Asystem=embedded -Acpu=powerpc -Amachine=powerpc"
diff --git a/contrib/gcc/config/rs6000/eabiaix.h b/contrib/gcc/config/rs6000/eabiaix.h
new file mode 100644
index 0000000..639a335
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabiaix.h
@@ -0,0 +1,39 @@
+/* Embedded ELF system support, using old AIX based calling sequence.
+ Copyright (C) 1995, 1996, 2000 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Default ABI to use */
+#undef RS6000_ABI_NAME
+#define RS6000_ABI_NAME "aix"
+
+#undef CPP_SYSV_DEFAULT_SPEC
+#define CPP_SYSV_DEFAULT_SPEC "-D_CALL_AIX"
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mbig", "mcall-aix" }
diff --git a/contrib/gcc/config/rs6000/eabialtivec.h b/contrib/gcc/config/rs6000/eabialtivec.h
new file mode 100644
index 0000000..23ec8c9
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabialtivec.h
@@ -0,0 +1,31 @@
+/* Core target definitions for GNU compiler
+ for PowerPC targeted systems with AltiVec support.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Add -meabi and -maltivec to target flags. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_EABI | MASK_ALTIVEC)
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Embedded with AltiVec)");
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1
diff --git a/contrib/gcc/config/rs6000/eabisim.h b/contrib/gcc/config/rs6000/eabisim.h
new file mode 100644
index 0000000..92e0957
--- /dev/null
+++ b/contrib/gcc/config/rs6000/eabisim.h
@@ -0,0 +1,44 @@
+/* Support for GCC on simulated PowerPC systems targeted to embedded ELF
+ systems.
+ Copyright (C) 1995, 1996, 2000 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC Simulated)");
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DPPC -D__embedded__ -D__simulator__ -Asystem=embedded -Asystem=simulator -Acpu=powerpc -Amachine=powerpc"
+
+/* Make the simulator the default */
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_sim)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_sim)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_sim)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_sim)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_sim)"
diff --git a/contrib/gcc/config/rs6000/freebsd.h b/contrib/gcc/config/rs6000/freebsd.h
new file mode 100644
index 0000000..e4b9fc5
--- /dev/null
+++ b/contrib/gcc/config/rs6000/freebsd.h
@@ -0,0 +1,66 @@
+/* Definitions for PowerPC running FreeBSD using the ELF format
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by David E. O'Brien <obrien@FreeBSD.org> and BSDi.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* Override the defaults, which exist to force the proper definition. */
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_freebsd)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_freebsd)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_freebsd)"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_freebsd)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_freebsd)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_freebsd)"
+
+
+/************************[ Target stuff ]***********************************/
+
+/* Define the actual types of some ANSI-mandated types.
+ Needs to agree with <machine/ansi.h>. GCC defaults come from c-decl.c,
+ c-common.c, and config/<arch>/<arch>.h. */
+
+/* rs6000.h gets this wrong for FreeBSD. We use the GCC defaults instead. */
+#undef WCHAR_TYPE
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (FreeBSD/PowerPC ELF)");
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
diff --git a/contrib/gcc/config/rs6000/linux.h b/contrib/gcc/config/rs6000/linux.h
new file mode 100644
index 0000000..99c0453
--- /dev/null
+++ b/contrib/gcc/config/rs6000/linux.h
@@ -0,0 +1,130 @@
+/* Definitions of target machine for GNU compiler,
+ for powerpc machines running Linux.
+ Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001 Free Software Foundation,
+ Inc.
+ Contributed by Michael Meissner (meissner@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DPPC -D__ELF__ -Dpowerpc -Acpu=powerpc -Amachine=powerpc"
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)"
+
+/* The GNU C++ standard library currently requires _GNU_SOURCE being
+ defined on glibc-based systems. This temporary hack accomplishes this,
+ it should go away as soon as libstdc++-v3 has a real fix. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_linux)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_linux)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC GNU/Linux)");
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* For backward compatibility, we must continue to use the AIX
+ structure return convention. */
+#undef DRAFT_V4_STRUCT_RET
+#define DRAFT_V4_STRUCT_RET 1
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#ifdef IN_LIBGCC2
+#include <signal.h>
+#include <sys/ucontext.h>
+
+enum { SIGNAL_FRAMESIZE = 64 };
+#endif
+
+#define MD_FALLBACK_FRAME_STATE_FOR(CONTEXT, FS, SUCCESS) \
+ do { \
+ unsigned char *pc_ = (CONTEXT)->ra; \
+ struct sigcontext *sc_; \
+ long new_cfa_; \
+ int i_; \
+ \
+ /* li r0, 0x7777; sc (rt_sigreturn) */ \
+ /* li r0, 0x6666; sc (sigreturn) */ \
+ if (((*(unsigned int *) (pc_+0) == 0x38007777) \
+ || (*(unsigned int *) (pc_+0) == 0x38006666)) \
+ && (*(unsigned int *) (pc_+4) == 0x44000002)) \
+ sc_ = (CONTEXT)->cfa + SIGNAL_FRAMESIZE; \
+ else \
+ break; \
+ \
+ new_cfa_ = sc_->regs->gpr[STACK_POINTER_REGNUM]; \
+ (FS)->cfa_how = CFA_REG_OFFSET; \
+ (FS)->cfa_reg = STACK_POINTER_REGNUM; \
+ (FS)->cfa_offset = new_cfa_ - (long) (CONTEXT)->cfa; \
+ \
+ for (i_ = 0; i_ < 32; i_++) \
+ if (i_ != STACK_POINTER_REGNUM) \
+ { \
+ (FS)->regs.reg[i_].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[i_].loc.offset \
+ = (long)&(sc_->regs->gpr[i_]) - new_cfa_; \
+ } \
+ \
+ (FS)->regs.reg[LINK_REGISTER_REGNUM].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[LINK_REGISTER_REGNUM].loc.offset \
+ = (long)&(sc_->regs->link) - new_cfa_; \
+ \
+ /* The unwinder expects the IP to point to the following insn, \
+ whereas the kernel returns the address of the actual \
+ faulting insn. */ \
+ sc_->regs->nip += 4; \
+ (FS)->regs.reg[CR0_REGNO].how = REG_SAVED_OFFSET; \
+ (FS)->regs.reg[CR0_REGNO].loc.offset \
+ = (long)&(sc_->regs->nip) - new_cfa_; \
+ (FS)->retaddr_column = CR0_REGNO; \
+ goto SUCCESS; \
+ } while (0)
+
diff --git a/contrib/gcc/config/rs6000/linux64.h b/contrib/gcc/config/rs6000/linux64.h
new file mode 100644
index 0000000..d014afe
--- /dev/null
+++ b/contrib/gcc/config/rs6000/linux64.h
@@ -0,0 +1,298 @@
+/* Definitions of target machine for GNU compiler,
+ for 64 bit powerpc linux.
+ Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Yes! We are AIX! Err. Wait. We're Linux!. No, wait, we're a
+ combo of both!*/
+#undef DEFAULT_ABI
+#define DEFAULT_ABI ABI_AIX
+
+#undef TARGET_AIX
+#define TARGET_AIX 1
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_POWERPC | MASK_POWERPC64 | MASK_64BIT | MASK_NEW_MNEMONICS)
+
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_PPC64"
+
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc64"
+
+/* 64-bit PowerPC Linux always has a TOC. */
+#undef TARGET_NO_TOC
+#define TARGET_NO_TOC 0
+#undef TARGET_TOC
+#define TARGET_TOC 1
+
+/* We use glibc _mcount for profiling. */
+#define NO_PROFILE_COUNTERS 1
+#undef PROFILE_BEFORE_PROLOGUE
+
+/* Define this for kernel profiling, which just saves LR then calls
+ _mcount without worrying about arg saves. The idea is to change
+ the function prologue as little as possible as it isn't easy to
+ account for arg save/restore code added just for _mcount. */
+/* #define PROFILE_KERNEL 1 */
+#if PROFILE_KERNEL
+#define PROFILE_BEFORE_PROLOGUE 1
+#undef PROFILE_HOOK
+#else
+#define PROFILE_HOOK(LABEL) output_profile_hook (LABEL)
+#endif
+
+/* We don't need to generate entries in .fixup. */
+#undef RELOCATABLE_NEEDS_FIXUP
+
+#define USER_LABEL_PREFIX ""
+
+/* AIX word-aligns FP doubles but doubleword-aligns 64-bit ints. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ (TYPE_MODE (TREE_CODE (TREE_TYPE (FIELD)) == ARRAY_TYPE \
+ ? get_inner_array_type (FIELD) \
+ : TREE_TYPE (FIELD)) == DFmode \
+ ? MIN ((COMPUTED), 32) : (COMPUTED))
+
+/* AIX increases natural record alignment to doubleword if the first
+ field is an FP double while the FP fields remain word aligned. */
+#undef ROUND_TYPE_ALIGN
+#define ROUND_TYPE_ALIGN(STRUCT, COMPUTED, SPECIFIED) \
+ ((TREE_CODE (STRUCT) == RECORD_TYPE \
+ || TREE_CODE (STRUCT) == UNION_TYPE \
+ || TREE_CODE (STRUCT) == QUAL_UNION_TYPE) \
+ && TYPE_FIELDS (STRUCT) != 0 \
+ && DECL_MODE (TYPE_FIELDS (STRUCT)) == DFmode \
+ ? MAX (MAX ((COMPUTED), (SPECIFIED)), 64) \
+ : MAX ((COMPUTED), (SPECIFIED)))
+
+/* Indicate that jump tables go in the text section. */
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Define cutoff for using external functions to save floating point. */
+#undef FP_SAVE_INLINE
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) == 62 || (FIRST_REG) == 63)
+
+/* 64-bit PowerPC Linux always has GPR13 fixed. */
+#define FIXED_R13 1
+
+/* __throw will restore its own return address to be the same as the
+ return address of the function that the throw is being made to.
+ This is unfortunate, because we want to check the original
+ return address to see if we need to restore the TOC.
+ So we have to squirrel it away with this. */
+#define SETUP_FRAME_ADDRESSES() rs6000_aix_emit_builtin_unwind_init ()
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-D_PPC_ -D__PPC__ -D_PPC64_ -D__PPC64__ -D__powerpc__ -D__powerpc64__ \
+ -D_PIC_ -D__PIC__ -D_BIG_ENDIAN -D__BIG_ENDIAN__ -D__ELF__ \
+ -D__LONG_MAX__=9223372036854775807L \
+ -Acpu=powerpc64 -Amachine=powerpc64"
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_linux)"
+
+/* The GNU C++ standard library currently requires _GNU_SOURCE being
+ defined on glibc-based systems. This temporary hack accomplishes this,
+ it should go away as soon as libstdc++-v3 has a real fix. */
+#undef CPLUSPLUS_CPP_SPEC
+#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)"
+
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "%{shared:-shared} %{!shared: %{static:-static}}"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_linux)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_linux)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_linux)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_linux)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_linux)"
+
+#undef LINK_OS_LINUX_SPEC
+#define LINK_OS_LINUX_SPEC "-m elf64ppc %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+
+#undef TOC_SECTION_ASM_OP
+#define TOC_SECTION_ASM_OP "\t.section\t\".toc\",\"aw\""
+
+#undef MINIMAL_TOC_SECTION_ASM_OP
+#define MINIMAL_TOC_SECTION_ASM_OP "\t.section\t\".toc1\",\"aw\""
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC64 GNU/Linux)");
+
+/* Must be at least as big as our pointer type. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+/* Override rs6000.h definition. */
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* PowerPC no-op instruction. */
+#undef RS6000_CALL_GLUE
+#define RS6000_CALL_GLUE "nop"
+
+#undef RS6000_MCOUNT
+#define RS6000_MCOUNT "_mcount"
+
+/* FP save and restore routines. */
+#undef SAVE_FP_PREFIX
+#define SAVE_FP_PREFIX "._savef"
+#undef SAVE_FP_SUFFIX
+#define SAVE_FP_SUFFIX ""
+#undef RESTORE_FP_PREFIX
+#define RESTORE_FP_PREFIX "._restf"
+#undef RESTORE_FP_SUFFIX
+#define RESTORE_FP_SUFFIX ""
+
+/* Dwarf2 debugging. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* If we are referencing a function that is static or is known to be
+ in this file, make the SYMBOL_REF special. We can use this to indicate
+ that we can branch to this function without emitting a no-op after the
+ call. Do not set this flag if the function is weakly defined. */
+
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) \
+ if (TREE_CODE (DECL) == FUNCTION_DECL \
+ && (TREE_ASM_WRITTEN (DECL) || ! TREE_PUBLIC (DECL)) \
+ && ! DECL_WEAK (DECL)) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1;
+
+/* This macro gets just the user-specified name
+ out of the string in a SYMBOL_REF. Discard
+ a leading * or @. */
+#define STRIP_NAME_ENCODING(VAR,SYMBOL_NAME) \
+do { \
+ const char *_name = (SYMBOL_NAME); \
+ while (*_name == '*' || *_name == '@') \
+ _name++; \
+ (VAR) = _name; \
+} while (0)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+/* Override elfos.h definition. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+do { \
+ const char *_name = NAME; \
+ if (*_name == '@') \
+ _name++; \
+ \
+ if (*_name == '*') \
+ fprintf (FILE, "%s", _name + 1); \
+ else \
+ asm_fprintf (FILE, "%U%s", _name); \
+} while (0)
+
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", (FILE)); \
+ ASM_OUTPUT_LABEL ((FILE), (NAME)); \
+ fputs (DOUBLE_INT_ASM_OP, (FILE)); \
+ putc ('.', (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ fputs (DOUBLE_INT_ASM_OP, (FILE)); \
+ fputs (".TOC.@tocbase, 0\n\t.previous\n", (FILE)); \
+ \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (DECL_WEAK (DECL)) \
+ fputs ("\t.weak\t", (FILE)); \
+ else \
+ fputs ("\t.globl\t", (FILE)); \
+ putc ('.', (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ } \
+ fputs (TYPE_ASM_OP, (FILE)); \
+ putc ('.', (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc (',', (FILE)); \
+ fprintf ((FILE), TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', (FILE)); \
+ ASM_DECLARE_RESULT ((FILE), DECL_RESULT (DECL)); \
+ putc ('.', (FILE)); \
+ ASM_OUTPUT_LABEL ((FILE), (NAME)); \
+ } \
+ while (0)
+
+/* Return non-zero if this entry is to be written into the constant
+ pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
+ or a CONST containing one of them. If -mfp-in-toc (the default),
+ we also do this for floating-point constants. We actually can only
+ do this if the FP formats of the target and host machines are the
+ same, but we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h. We also do this when
+ we can write the entry into the TOC and the entry is not larger
+ than a TOC entry. */
+
+#undef ASM_OUTPUT_SPECIAL_POOL_ENTRY_P
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (GET_CODE (X) == CONST_DOUBLE \
+ && (TARGET_POWERPC64 \
+ || TARGET_MINIMAL_TOC \
+ || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && ! TARGET_NO_FP_IN_TOC)))))
+
diff --git a/contrib/gcc/config/rs6000/linuxaltivec.h b/contrib/gcc/config/rs6000/linuxaltivec.h
new file mode 100644
index 0000000..d2f184f
--- /dev/null
+++ b/contrib/gcc/config/rs6000/linuxaltivec.h
@@ -0,0 +1,31 @@
+/* Definitions of target machine for GNU compiler,
+ for AltiVec enhanced PowerPC machines running GNU/Linux.
+ Copyright (C) 2001 Free Software Foundation, Inc.
+ Contributed by Aldy Hernandez (aldyh@redhat.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC AltiVec GNU/Linux)");
+
+/* Override rs6000.h and sysv4.h definition. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_ALTIVEC)
+
+#undef SUBSUBTARGET_OVERRIDE_OPTIONS
+#define SUBSUBTARGET_OVERRIDE_OPTIONS rs6000_altivec_abi = 1
diff --git a/contrib/gcc/config/rs6000/lynx.h b/contrib/gcc/config/rs6000/lynx.h
new file mode 100644
index 0000000..5a859ca
--- /dev/null
+++ b/contrib/gcc/config/rs6000/lynx.h
@@ -0,0 +1,100 @@
+/* Definitions for Rs6000 running LynxOS.
+ Copyright (C) 1995, 1996, 2000 Free Software Foundation, Inc.
+ Contributed by David Henkel-Wallace, Cygnus Support (gumby@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Definitions we want to override with those from rs6000.h: */
+#undef LIB_SPEC
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+#undef ASM_FILE_START
+#undef EXTRA_SECTIONS
+#undef READONLY_DATA_SECTION
+#undef EXTRA_SECTION_FUNCTIONS
+#undef SELECT_RTX_SECTION
+#undef SELECT_SECTION
+#undef USER_LABEL_PREFIX
+#undef ASM_OUTPUT_LABELREF
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#undef ASM_GENERATE_INTERNAL_LABEL
+#undef ASM_OUTPUT_COMMON
+#undef ASM_OUTPUT_LOCAL
+
+#undef SDB_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#undef PREFERRED_DEBUGGING_TYPE
+
+#undef FUNCTION_PROFILER
+
+#include <rs6000/rs6000.h>
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION fprintf (stderr, " (LynxOS-RS/6000)");
+
+/* LynxOS has signed chars, regardless of what most R/S 6000 systems do */
+#undef DEFAULT_SIGNED_CHAR
+#define DEFAULT_SIGNED_CHAR 1
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Acpu=rs6000 -Amachine=rs6000 -Asystem=lynx -Asystem=unix -DLynx -D_IBMR2 -Dunix -Drs6000 -Dlynx -DLYNX"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-T0x10001000 -H0x1000 -D0x20000000 -btextro -bhalt:4 -bnodelcsect -bnso -bro -bnoglink %{v} %{b*}"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{mthreads:-L/lib/thread/} \
+ %{msystem-v:-lc_v -lm.v} \
+ %{!msystem-v:%{mposix:-lc_p} -lc -lm}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{p:%{mthreads:thread/pinit.o%s}%{!mthreads:pinit.o%s}}%{!p:%{msystem-v:vinit.o%s -e_start}%{!msystem-v:%{mthreads:thread/init.o%s}%{!mthreads:init.o%s}}}"
+
+#undef ENDFILE_SPEC
+
+/* This can become more refined as we have more powerpc options. */
+#undef ASM_SPEC
+#define ASM_SPEC "-u %(asm_cpu)"
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ {"threads", MASK_THREADS}, \
+ {"posix", MASK_POSIX}, \
+ {"system-v", MASK_SYSTEM_V},
+
+#undef SUBTARGET_OVERRIDE_OPTIONS
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (TARGET_SYSTEM_V && profile_flag) \
+ warning ("-msystem-v and -p are incompatible"); \
+ if (TARGET_SYSTEM_V && TARGET_THREADS) \
+ warning ("-msystem-v and -mthreads are incompatible"); \
+} while (0)
+
+/* For collect2 */
+#define OBJECT_FORMAT_NONE
+#undef OBJECT_FORMAT_COFF
+#undef OBJECT_FORMAT_ROSE
+#undef MD_EXEC_PREFIX
+#undef REAL_LD_FILE_NAME
+#undef REAL_STRIP_FILE_NAME
+
+/* LynxOS doesn't have mcount. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(file, profile_label_no)
diff --git a/contrib/gcc/config/rs6000/mach.h b/contrib/gcc/config/rs6000/mach.h
new file mode 100644
index 0000000..d4395d5
--- /dev/null
+++ b/contrib/gcc/config/rs6000/mach.h
@@ -0,0 +1,43 @@
+/* Definitions of target machine for GNU compiler,
+ for IBM RS/6000 running MACH.
+ Copyright (C) 1992, 1999 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define TARGET_AIX 0
+
+/* Print subsidiary information on the compiler version in use. */
+#define TARGET_VERSION fprintf (stderr, " (Mach-RS/6000)");
+
+/* We don't define AIX under MACH; instead we define `unix'. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Drios -D_IBMR2 -Dunix -Asystem=unix -Asystem=mach -Acpu=rs6000 -Amachine=rs6000"
+
+/* Define different binder options for MACH. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "-T0x10000000 -D0x20000000 -K %{!nostdlib:%{!r*:%{!e*:-e __start}}} \
+ -bnoso -berrmsg -btextro -bhalt:4 -bnodelcsect"
+
+/* MACH doesn't have atexit. */
+#define NEED_ATEXIT
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
diff --git a/contrib/gcc/config/rs6000/milli.exp b/contrib/gcc/config/rs6000/milli.exp
new file mode 100644
index 0000000..ea3a2b7
--- /dev/null
+++ b/contrib/gcc/config/rs6000/milli.exp
@@ -0,0 +1,7 @@
+#!
+__mulh 0x3100
+__mull 0x3180
+__divss 0x3200
+__divus 0x3280
+__quoss 0x3300
+__quous 0x3380
diff --git a/contrib/gcc/config/rs6000/netbsd.h b/contrib/gcc/config/rs6000/netbsd.h
new file mode 100644
index 0000000..0e58a45
--- /dev/null
+++ b/contrib/gcc/config/rs6000/netbsd.h
@@ -0,0 +1,66 @@
+/* Definitions of target machine for GNU compiler,
+ for PowerPC NetBSD systems.
+ Copyright 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Under NetBSD, the normal location of the various *crt*.o files is
+ the /usr/lib directory [from config/netbsd.h]. */
+
+#undef STANDARD_STARTFILE_PREFIX
+#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+
+/* FIXME: Should this macro be treated the same as for the other
+ spec's? */
+/* NOTE: -dc and -dp are equivalent yet NetBSD's CC passes both both!
+ NetBSD's CC also passes -O1 but we can skip that. NetBSD explictly
+ sets ``-e _start'', since LD knows this, skip it. */
+
+#undef LINK_SHLIB_SPEC
+#define LINK_SHLIB_SPEC "\
+%{shared:-shared} \
+%{!shared: %{static:-dc -dp -static}} \
+%{!shared: %{!static:-dc -dp}} \
+"
+
+/* Override the defaults. */
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_netbsd)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_netbsd)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_netbsd)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_netbsd)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_netbsd)"
+
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_netbsd)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC NetBSD/ELF)");
+
+/* For backward compatibility, we must continue to use the AIX
+ structure return convention. */
+#undef DRAFT_V4_STRUCT_RET
+#define DRAFT_V4_STRUCT_RET 1
diff --git a/contrib/gcc/config/rs6000/ppc-asm.h b/contrib/gcc/config/rs6000/ppc-asm.h
new file mode 100644
index 0000000..3a6fb2a
--- /dev/null
+++ b/contrib/gcc/config/rs6000/ppc-asm.h
@@ -0,0 +1,196 @@
+/* PowerPC asm definitions for GNU C. */
+/* Under winnt, 1) gas supports the following as names and 2) in particular
+ defining "toc" breaks the FUNC_START macro as ".toc" becomes ".2" */
+
+#if !defined(__WINNT__)
+#define r0 0
+#define sp 1
+#define toc 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+
+#define cr0 0
+#define cr1 1
+#define cr2 2
+#define cr3 3
+#define cr4 4
+#define cr5 5
+#define cr6 6
+#define cr7 7
+
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#endif
+
+/*
+ * Macros to glue together two tokens.
+ */
+
+#ifdef __STDC__
+#define XGLUE(a,b) a##b
+#else
+#define XGLUE(a,b) a/**/b
+#endif
+
+#define GLUE(a,b) XGLUE(a,b)
+
+/*
+ * Macros to begin and end a function written in assembler. If -mcall-aixdesc
+ * or -mcall-nt, create a function descriptor with the given name, and create
+ * the real function with one or two leading periods respectively.
+ */
+
+#ifdef _RELOCATABLE
+#define DESC_SECTION ".got2"
+#else
+#define DESC_SECTION ".got1"
+#endif
+
+#if defined(_CALL_AIXDESC)
+#define FUNC_NAME(name) GLUE(.,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#define FUNC_START(name) \
+ .section DESC_SECTION,"aw"; \
+name: \
+ .long GLUE(.,name); \
+ .long _GLOBAL_OFFSET_TABLE_; \
+ .long 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+ .globl name; \
+ .globl GLUE(.,name); \
+GLUE(.,name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name)
+
+#elif defined(__WINNT__)
+#define FUNC_NAME(name) GLUE(..,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#define FUNC_START(name) \
+ .pdata; \
+ .align 2; \
+ .ualong GLUE(..,name),GLUE(name,.e),0,0,GLUE(..,name); \
+ .reldata; \
+name: \
+ .ualong GLUE(..,name),.toc; \
+ .section .text; \
+ .globl name; \
+ .globl GLUE(..,name); \
+GLUE(..,name):
+
+#define FUNC_END(name) \
+GLUE(name,.e): ; \
+GLUE(FE_MOT_RESVD..,name):
+
+#elif defined(_CALL_NT)
+#define FUNC_NAME(name) GLUE(..,name)
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#define FUNC_START(name) \
+ .section DESC_SECTION,"aw"; \
+name: \
+ .long GLUE(..,name); \
+ .long _GLOBAL_OFFSET_TABLE_; \
+ .previous; \
+ .type GLUE(..,name),@function; \
+ .globl name; \
+ .globl GLUE(..,name); \
+GLUE(..,name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(..,name),GLUE(.L,name)-GLUE(..,name)
+
+#elif defined (__powerpc64__)
+#define FUNC_NAME(name) GLUE(.,name)
+#define FUNC_START(name) \
+ .section ".opd","aw"; \
+name: \
+ .quad GLUE(.,name); \
+ .quad .TOC.@tocbase; \
+ .quad 0; \
+ .previous; \
+ .type GLUE(.,name),@function; \
+ .globl name; \
+ .globl GLUE(.,name); \
+GLUE(.,name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size GLUE(.,name),GLUE(.L,name)-GLUE(.,name)
+
+#else
+#define FUNC_NAME(name) GLUE(__USER_LABEL_PREFIX__,name)
+#if defined __PIC__ || defined __pic__
+#define JUMP_TARGET(name) FUNC_NAME(name@plt)
+#else
+#define JUMP_TARGET(name) FUNC_NAME(name)
+#endif
+#define FUNC_START(name) \
+ .type FUNC_NAME(name),@function; \
+ .globl FUNC_NAME(name); \
+FUNC_NAME(name):
+
+#define FUNC_END(name) \
+GLUE(.L,name): \
+ .size FUNC_NAME(name),GLUE(.L,name)-FUNC_NAME(name)
+#endif
+
diff --git a/contrib/gcc/config/rs6000/rs6000-protos.h b/contrib/gcc/config/rs6000/rs6000-protos.h
new file mode 100644
index 0000000..c40689e
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rs6000-protos.h
@@ -0,0 +1,186 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Declare functions in rs6000.c */
+
+#ifdef RTX_CODE
+
+#ifdef TREE_CODE
+extern void init_cumulative_args PARAMS ((CUMULATIVE_ARGS *, tree, rtx, int));
+extern void rs6000_va_start PARAMS ((int, tree, rtx));
+#endif /* TREE_CODE */
+
+extern struct rtx_def *rs6000_got_register PARAMS ((rtx));
+extern struct rtx_def *find_addr_reg PARAMS ((rtx));
+extern int any_operand PARAMS ((rtx, enum machine_mode));
+extern int short_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int u_short_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int non_short_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int exact_log2_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int gpc_reg_operand PARAMS ((rtx, enum machine_mode));
+extern int cc_reg_operand PARAMS ((rtx, enum machine_mode));
+extern int cc_reg_not_cr0_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_short_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_neg_short_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_u_short_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_arith_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_add_cint64_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_sub_cint64_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_logical_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int got_operand PARAMS ((rtx, enum machine_mode));
+extern int got_no_const_operand PARAMS ((rtx, enum machine_mode));
+extern int num_insns_constant PARAMS ((rtx, enum machine_mode));
+extern int easy_fp_constant PARAMS ((rtx, enum machine_mode));
+extern int zero_fp_constant PARAMS ((rtx, enum machine_mode));
+extern int volatile_mem_operand PARAMS ((rtx, enum machine_mode));
+extern int offsettable_mem_operand PARAMS ((rtx, enum machine_mode));
+extern int mem_or_easy_const_operand PARAMS ((rtx, enum machine_mode));
+extern int add_operand PARAMS ((rtx, enum machine_mode));
+extern int non_add_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int non_logical_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int logical_operand PARAMS ((rtx, enum machine_mode));
+extern int mask_operand PARAMS ((rtx, enum machine_mode));
+extern int mask64_operand PARAMS ((rtx, enum machine_mode));
+extern int and64_operand PARAMS ((rtx, enum machine_mode));
+extern int and_operand PARAMS ((rtx, enum machine_mode));
+extern int count_register_operand PARAMS ((rtx, enum machine_mode));
+extern int xer_operand PARAMS ((rtx, enum machine_mode));
+extern int reg_or_mem_operand PARAMS ((rtx, enum machine_mode));
+extern int lwa_operand PARAMS ((rtx, enum machine_mode));
+extern int call_operand PARAMS ((rtx, enum machine_mode));
+extern int current_file_function_operand PARAMS ((rtx, enum machine_mode));
+extern int input_operand PARAMS ((rtx, enum machine_mode));
+extern int small_data_operand PARAMS ((rtx, enum machine_mode));
+extern int s8bit_cint_operand PARAMS ((rtx, enum machine_mode));
+extern int constant_pool_expr_p PARAMS ((rtx));
+extern int toc_relative_expr_p PARAMS ((rtx));
+extern int expand_block_move PARAMS ((rtx[]));
+extern int load_multiple_operation PARAMS ((rtx, enum machine_mode));
+extern int store_multiple_operation PARAMS ((rtx, enum machine_mode));
+extern int branch_comparison_operator PARAMS ((rtx, enum machine_mode));
+extern int branch_positive_comparison_operator
+ PARAMS ((rtx, enum machine_mode));
+extern int scc_comparison_operator PARAMS ((rtx, enum machine_mode));
+extern int trap_comparison_operator PARAMS ((rtx, enum machine_mode));
+extern int boolean_operator PARAMS ((rtx, enum machine_mode));
+extern int boolean_or_operator PARAMS ((rtx, enum machine_mode));
+extern int min_max_operator PARAMS ((rtx, enum machine_mode));
+extern int includes_lshift_p PARAMS ((rtx, rtx));
+extern int includes_rshift_p PARAMS ((rtx, rtx));
+extern int includes_rldic_lshift_p PARAMS ((rtx, rtx));
+extern int includes_rldicr_lshift_p PARAMS ((rtx, rtx));
+extern int registers_ok_for_quad_peep PARAMS ((rtx, rtx));
+extern int addrs_ok_for_quad_peep PARAMS ((rtx, rtx));
+extern enum reg_class secondary_reload_class PARAMS ((enum reg_class,
+ enum machine_mode, rtx));
+extern int ccr_bit PARAMS ((rtx, int));
+extern void print_operand PARAMS ((FILE *, rtx, int));
+extern void print_operand_address PARAMS ((FILE *, rtx));
+extern enum rtx_code rs6000_reverse_condition PARAMS ((enum machine_mode,
+ enum rtx_code));
+extern void rs6000_emit_sCOND PARAMS ((enum rtx_code, rtx));
+extern void rs6000_emit_cbranch PARAMS ((enum rtx_code, rtx));
+extern char * output_cbranch PARAMS ((rtx, const char *, int, rtx));
+extern rtx rs6000_emit_set_const PARAMS ((rtx, enum machine_mode, rtx, int));
+extern int rs6000_emit_cmove PARAMS ((rtx, rtx, rtx, rtx));
+extern void rs6000_emit_minmax PARAMS ((rtx, enum rtx_code, rtx, rtx));
+extern void output_toc PARAMS ((FILE *, rtx, int, enum machine_mode));
+extern void rs6000_initialize_trampoline PARAMS ((rtx, rtx, rtx));
+extern struct rtx_def *rs6000_longcall_ref PARAMS ((rtx));
+extern void rs6000_fatal_bad_address PARAMS ((rtx));
+extern int stmw_operation PARAMS ((rtx, enum machine_mode));
+extern int mtcrf_operation PARAMS ((rtx, enum machine_mode));
+extern int lmw_operation PARAMS ((rtx, enum machine_mode));
+extern struct rtx_def *create_TOC_reference PARAMS ((rtx));
+extern void rs6000_emit_eh_toc_restore PARAMS ((rtx));
+extern void rs6000_emit_move PARAMS ((rtx, rtx, enum machine_mode));
+extern rtx rs6000_legitimize_address PARAMS ((rtx, rtx, enum machine_mode));
+extern rtx rs6000_legitimize_reload_address PARAMS ((rtx, enum machine_mode,
+ int, int, int, int *));
+extern int rs6000_legitimate_address PARAMS ((enum machine_mode, rtx, int));
+extern void rs6000_select_rtx_section PARAMS ((enum machine_mode, rtx));
+
+extern rtx rs6000_return_addr PARAMS ((int, rtx));
+extern void rs6000_output_symbol_ref PARAMS ((FILE*, rtx));
+
+extern rtx rs6000_machopic_legitimize_pic_address PARAMS ((rtx orig, enum machine_mode mode, rtx reg));
+
+#endif /* RTX_CODE */
+
+#ifdef TREE_CODE
+extern void function_arg_advance PARAMS ((CUMULATIVE_ARGS *, enum machine_mode,
+ tree, int));
+extern int function_arg_boundary PARAMS ((enum machine_mode, tree));
+extern struct rtx_def *function_arg PARAMS ((CUMULATIVE_ARGS *,
+ enum machine_mode, tree, int));
+extern int function_arg_partial_nregs PARAMS ((CUMULATIVE_ARGS *,
+ enum machine_mode, tree, int));
+extern int function_arg_pass_by_reference PARAMS ((CUMULATIVE_ARGS *,
+ enum machine_mode,
+ tree, int));
+extern void setup_incoming_varargs PARAMS ((CUMULATIVE_ARGS *,
+ enum machine_mode, tree,
+ int *, int));
+extern struct rtx_def *rs6000_va_arg PARAMS ((tree, tree));
+extern void output_mi_thunk PARAMS ((FILE *, tree, int, tree));
+extern void rs6000_encode_section_info PARAMS ((tree));
+extern void rs6000_select_section PARAMS ((tree, int));
+extern void rs6000_unique_section PARAMS ((tree, int));
+#ifdef ARGS_SIZE_RTX
+/* expr.h defines ARGS_SIZE_RTX and `enum direction' */
+extern enum direction function_arg_padding PARAMS ((enum machine_mode, tree));
+#endif /* ARGS_SIZE_RTX */
+
+#endif /* TREE_CODE */
+
+extern void optimization_options PARAMS ((int, int));
+extern void rs6000_override_options PARAMS ((const char *));
+extern void rs6000_file_start PARAMS ((FILE *, const char *));
+extern struct rtx_def *rs6000_float_const PARAMS ((const char *,
+ enum machine_mode));
+extern int direct_return PARAMS ((void));
+extern union tree_node *rs6000_build_va_list PARAMS ((void));
+extern int first_reg_to_save PARAMS ((void));
+extern int first_fp_reg_to_save PARAMS ((void));
+extern rs6000_stack_t *rs6000_stack_info PARAMS ((void));
+extern void output_ascii PARAMS ((FILE *, const char *, int));
+extern void rs6000_gen_section_name PARAMS ((char **, const char *,
+ const char *));
+extern void output_function_profiler PARAMS ((FILE *, int));
+extern void output_profile_hook PARAMS ((int));
+extern int rs6000_trampoline_size PARAMS ((void));
+extern void toc_section PARAMS ((void));
+extern void sdata_section PARAMS ((void));
+extern void sdata2_section PARAMS ((void));
+extern void sbss_section PARAMS ((void));
+extern void private_data_section PARAMS ((void));
+extern void read_only_data_section PARAMS ((void));
+extern void read_only_private_data_section PARAMS ((void));
+extern int get_TOC_alias_set PARAMS ((void));
+extern int uses_TOC PARAMS ((void));
+extern void rs6000_emit_prologue PARAMS ((void));
+extern void rs6000_emit_load_toc_table PARAMS ((int));
+extern void rs6000_aix_emit_builtin_unwind_init PARAMS ((void));
+extern void rs6000_emit_epilogue PARAMS ((int));
+extern void debug_stack_info PARAMS ((rs6000_stack_t *));
+
+extern void machopic_output_stub PARAMS ((FILE *, const char *, const char *));
diff --git a/contrib/gcc/config/rs6000/rs6000.c b/contrib/gcc/config/rs6000/rs6000.c
new file mode 100644
index 0000000..f8375a4
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rs6000.c
@@ -0,0 +1,11145 @@
+/* Subroutines used for code generation on IBM RS/6000.
+ Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "obstack.h"
+#include "tree.h"
+#include "expr.h"
+#include "optabs.h"
+#include "except.h"
+#include "function.h"
+#include "output.h"
+#include "basic-block.h"
+#include "integrate.h"
+#include "toplev.h"
+#include "ggc.h"
+#include "hashtab.h"
+#include "tm_p.h"
+#include "target.h"
+#include "target-def.h"
+#include "langhooks.h"
+#include "reload.h"
+
+#ifndef TARGET_NO_PROTOTYPE
+#define TARGET_NO_PROTOTYPE 0
+#endif
+
+#define min(A,B) ((A) < (B) ? (A) : (B))
+#define max(A,B) ((A) > (B) ? (A) : (B))
+
+/* Target cpu type */
+
+enum processor_type rs6000_cpu;
+struct rs6000_cpu_select rs6000_select[3] =
+{
+ /* switch name, tune arch */
+ { (const char *)0, "--with-cpu=", 1, 1 },
+ { (const char *)0, "-mcpu=", 1, 1 },
+ { (const char *)0, "-mtune=", 1, 0 },
+};
+
+/* Size of long double */
+const char *rs6000_long_double_size_string;
+int rs6000_long_double_type_size;
+
+/* Whether -mabi=altivec has appeared */
+int rs6000_altivec_abi;
+
+/* Set to non-zero once AIX common-mode calls have been defined. */
+static int common_mode_defined;
+
+/* Save information from a "cmpxx" operation until the branch or scc is
+ emitted. */
+rtx rs6000_compare_op0, rs6000_compare_op1;
+int rs6000_compare_fp_p;
+
+/* Label number of label created for -mrelocatable, to call to so we can
+ get the address of the GOT section */
+int rs6000_pic_labelno;
+
+#ifdef USING_ELFOS_H
+/* Which abi to adhere to */
+const char *rs6000_abi_name = RS6000_ABI_NAME;
+
+/* Semantics of the small data area */
+enum rs6000_sdata_type rs6000_sdata = SDATA_DATA;
+
+/* Which small data model to use */
+const char *rs6000_sdata_name = (char *)0;
+
+/* Counter for labels which are to be placed in .fixup. */
+int fixuplabelno = 0;
+#endif
+
+/* ABI enumeration available for subtarget to use. */
+enum rs6000_abi rs6000_current_abi;
+
+/* ABI string from -mabi= option. */
+const char *rs6000_abi_string;
+
+/* Debug flags */
+const char *rs6000_debug_name;
+int rs6000_debug_stack; /* debug stack applications */
+int rs6000_debug_arg; /* debug argument handling */
+
+/* Flag to say the TOC is initialized */
+int toc_initialized;
+char toc_label_name[10];
+
+/* Alias set for saves and restores from the rs6000 stack. */
+static int rs6000_sr_alias_set;
+
+static void rs6000_add_gc_roots PARAMS ((void));
+static int num_insns_constant_wide PARAMS ((HOST_WIDE_INT));
+static rtx expand_block_move_mem PARAMS ((enum machine_mode, rtx, rtx));
+static void validate_condition_mode
+ PARAMS ((enum rtx_code, enum machine_mode));
+static rtx rs6000_generate_compare PARAMS ((enum rtx_code));
+static void rs6000_maybe_dead PARAMS ((rtx));
+static void rs6000_emit_stack_tie PARAMS ((void));
+static void rs6000_frame_related PARAMS ((rtx, rtx, HOST_WIDE_INT, rtx, rtx));
+static void rs6000_emit_allocate_stack PARAMS ((HOST_WIDE_INT, int));
+static unsigned rs6000_hash_constant PARAMS ((rtx));
+static unsigned toc_hash_function PARAMS ((const void *));
+static int toc_hash_eq PARAMS ((const void *, const void *));
+static int toc_hash_mark_entry PARAMS ((void **, void *));
+static void toc_hash_mark_table PARAMS ((void *));
+static int constant_pool_expr_1 PARAMS ((rtx, int *, int *));
+static void rs6000_free_machine_status PARAMS ((struct function *));
+static void rs6000_init_machine_status PARAMS ((struct function *));
+static bool rs6000_assemble_integer PARAMS ((rtx, unsigned int, int));
+static int rs6000_ra_ever_killed PARAMS ((void));
+static tree rs6000_handle_longcall_attribute PARAMS ((tree *, tree, tree, int, bool *));
+const struct attribute_spec rs6000_attribute_table[];
+static void rs6000_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT));
+static void rs6000_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT));
+static rtx rs6000_emit_set_long_const PARAMS ((rtx,
+ HOST_WIDE_INT, HOST_WIDE_INT));
+#if TARGET_ELF
+static unsigned int rs6000_elf_section_type_flags PARAMS ((tree, const char *,
+ int));
+static void rs6000_elf_asm_out_constructor PARAMS ((rtx, int));
+static void rs6000_elf_asm_out_destructor PARAMS ((rtx, int));
+#endif
+#ifdef OBJECT_FORMAT_COFF
+static void xcoff_asm_named_section PARAMS ((const char *, unsigned int));
+#endif
+static int rs6000_adjust_cost PARAMS ((rtx, rtx, rtx, int));
+static int rs6000_adjust_priority PARAMS ((rtx, int));
+static int rs6000_issue_rate PARAMS ((void));
+
+static void rs6000_init_builtins PARAMS ((void));
+static void altivec_init_builtins PARAMS ((void));
+static rtx rs6000_expand_builtin PARAMS ((tree, rtx, rtx, enum machine_mode, int));
+static rtx altivec_expand_builtin PARAMS ((tree, rtx));
+static rtx altivec_expand_unop_builtin PARAMS ((enum insn_code, tree, rtx));
+static rtx altivec_expand_binop_builtin PARAMS ((enum insn_code, tree, rtx));
+static rtx altivec_expand_ternop_builtin PARAMS ((enum insn_code, tree, rtx));
+static rtx altivec_expand_stv_builtin PARAMS ((enum insn_code, tree));
+static void rs6000_parse_abi_options PARAMS ((void));
+static int first_altivec_reg_to_save PARAMS ((void));
+static unsigned int compute_vrsave_mask PARAMS ((void));
+static void is_altivec_return_reg PARAMS ((rtx, void *));
+int vrsave_operation PARAMS ((rtx, enum machine_mode));
+static rtx generate_set_vrsave PARAMS ((rtx, rs6000_stack_t *, int));
+static void altivec_frame_fixup PARAMS ((rtx, rtx, HOST_WIDE_INT));
+
+/* Default register names. */
+char rs6000_reg_names[][8] =
+{
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "mq", "lr", "ctr","ap",
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "xer",
+ /* AltiVec registers. */
+ "0", "1", "2", "3", "4", "5", "6", "7",
+ "8", "9", "10", "11", "12", "13", "14", "15",
+ "16", "17", "18", "19", "20", "21", "22", "23",
+ "24", "25", "26", "27", "28", "29", "30", "31",
+ "vrsave"
+};
+
+#ifdef TARGET_REGNAMES
+static const char alt_reg_names[][8] =
+{
+ "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
+ "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
+ "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
+ "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
+ "mq", "lr", "ctr", "ap",
+ "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
+ "xer",
+ /* AltiVec registers. */
+ "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
+ "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
+ "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
+ "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
+ "%vrsave"
+};
+#endif
+
+#ifndef MASK_STRICT_ALIGN
+#define MASK_STRICT_ALIGN 0
+#endif
+
+/* Initialize the GCC target structure. */
+#undef TARGET_ATTRIBUTE_TABLE
+#define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
+
+#undef TARGET_ASM_ALIGNED_DI_OP
+#define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
+
+/* Default unaligned ops are only provided for ELF. Find the ops needed
+ for non-ELF systems. */
+#ifndef OBJECT_FORMAT_ELF
+#ifdef OBJECT_FORMAT_COFF
+/* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
+ 64-bit targets. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
+#undef TARGET_ASM_UNALIGNED_DI_OP
+#define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
+#else
+/* For Darwin. */
+#undef TARGET_ASM_UNALIGNED_HI_OP
+#define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
+#undef TARGET_ASM_UNALIGNED_SI_OP
+#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
+#endif
+#endif
+
+/* This hook deals with fixups for relocatable code and DI-mode objects
+ in 64-bit code. */
+#undef TARGET_ASM_INTEGER
+#define TARGET_ASM_INTEGER rs6000_assemble_integer
+
+#undef TARGET_ASM_FUNCTION_PROLOGUE
+#define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
+
+#if TARGET_ELF
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS rs6000_elf_section_type_flags
+#endif
+
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
+#undef TARGET_SCHED_ADJUST_PRIORITY
+#define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS rs6000_init_builtins
+
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
+
+/* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
+#define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+/* Override command line options. Mostly we process the processor
+ type and sometimes adjust other TARGET_ options. */
+
+void
+rs6000_override_options (default_cpu)
+ const char *default_cpu;
+{
+ size_t i, j;
+ struct rs6000_cpu_select *ptr;
+
+ /* Simplify the entries below by making a mask for any POWER
+ variant and any PowerPC variant. */
+
+#define POWER_MASKS (MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING)
+#define POWERPC_MASKS (MASK_POWERPC | MASK_PPC_GPOPT \
+ | MASK_PPC_GFXOPT | MASK_POWERPC64)
+#define POWERPC_OPT_MASKS (MASK_PPC_GPOPT | MASK_PPC_GFXOPT)
+
+ static struct ptt
+ {
+ const char *const name; /* Canonical processor name. */
+ const enum processor_type processor; /* Processor type enum value. */
+ const int target_enable; /* Target flags to enable. */
+ const int target_disable; /* Target flags to disable. */
+ } const processor_target_table[]
+ = {{"common", PROCESSOR_COMMON, MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_MASKS},
+ {"power", PROCESSOR_POWER,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"power2", PROCESSOR_POWER,
+ MASK_POWER | MASK_POWER2 | MASK_MULTIPLE | MASK_STRING,
+ POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"power3", PROCESSOR_PPC630,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT},
+ {"powerpc", PROCESSOR_POWERPC,
+ MASK_POWERPC | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"powerpc64", PROCESSOR_POWERPC64,
+ MASK_POWERPC | MASK_POWERPC64 | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS},
+ {"rios", PROCESSOR_RIOS1,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"rios1", PROCESSOR_RIOS1,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"rsc", PROCESSOR_PPC601,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"rsc1", PROCESSOR_PPC601,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"rios2", PROCESSOR_RIOS2,
+ MASK_POWER | MASK_MULTIPLE | MASK_STRING | MASK_POWER2,
+ POWERPC_MASKS | MASK_NEW_MNEMONICS},
+ {"rs64a", PROCESSOR_RS64A,
+ MASK_POWERPC | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS},
+ {"401", PROCESSOR_PPC403,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"403", PROCESSOR_PPC403,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS | MASK_STRICT_ALIGN,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"405", PROCESSOR_PPC405,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"505", PROCESSOR_MPCCORE,
+ MASK_POWERPC | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"601", PROCESSOR_PPC601,
+ MASK_POWER | MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_MULTIPLE | MASK_STRING,
+ MASK_POWER2 | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"602", PROCESSOR_PPC603,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"603", PROCESSOR_PPC603,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"603e", PROCESSOR_PPC603,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"ec603e", PROCESSOR_PPC603,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"604", PROCESSOR_PPC604,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"604e", PROCESSOR_PPC604e,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"620", PROCESSOR_PPC620,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT},
+ {"630", PROCESSOR_PPC630,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT},
+ {"740", PROCESSOR_PPC750,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"750", PROCESSOR_PPC750,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"7400", PROCESSOR_PPC7400,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"7450", PROCESSOR_PPC7450,
+ MASK_POWERPC | MASK_PPC_GFXOPT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | MASK_PPC_GPOPT | MASK_POWERPC64},
+ {"801", PROCESSOR_MPCCORE,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"821", PROCESSOR_MPCCORE,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"823", PROCESSOR_MPCCORE,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64},
+ {"860", PROCESSOR_MPCCORE,
+ MASK_POWERPC | MASK_SOFT_FLOAT | MASK_NEW_MNEMONICS,
+ POWER_MASKS | POWERPC_OPT_MASKS | MASK_POWERPC64}};
+
+ size_t ptt_size = sizeof (processor_target_table) / sizeof (struct ptt);
+
+ /* Save current -mmultiple/-mno-multiple status. */
+ int multiple = TARGET_MULTIPLE;
+ /* Save current -mstring/-mno-string status. */
+ int string = TARGET_STRING;
+
+ /* Identify the processor type. */
+ rs6000_select[0].string = default_cpu;
+ rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ for (j = 0; j < ptt_size; j++)
+ if (! strcmp (ptr->string, processor_target_table[j].name))
+ {
+ if (ptr->set_tune_p)
+ rs6000_cpu = processor_target_table[j].processor;
+
+ if (ptr->set_arch_p)
+ {
+ target_flags |= processor_target_table[j].target_enable;
+ target_flags &= ~processor_target_table[j].target_disable;
+ }
+ break;
+ }
+
+ if (j == ptt_size)
+ error ("bad value (%s) for %s switch", ptr->string, ptr->name);
+ }
+ }
+
+ /* If we are optimizing big endian systems for space, use the store
+ multiple instructions. */
+ if (BYTES_BIG_ENDIAN && optimize_size)
+ target_flags |= MASK_MULTIPLE;
+
+ /* If -mmultiple or -mno-multiple was explicitly used, don't
+ override with the processor default */
+ if (TARGET_MULTIPLE_SET)
+ target_flags = (target_flags & ~MASK_MULTIPLE) | multiple;
+
+ /* If -mstring or -mno-string was explicitly used, don't override
+ with the processor default. */
+ if (TARGET_STRING_SET)
+ target_flags = (target_flags & ~MASK_STRING) | string;
+
+ /* Don't allow -mmultiple or -mstring on little endian systems
+ unless the cpu is a 750, because the hardware doesn't support the
+ instructions used in little endian mode, and causes an alignment
+ trap. The 750 does not cause an alignment trap (except when the
+ target is unaligned). */
+
+ if (! BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
+ {
+ if (TARGET_MULTIPLE)
+ {
+ target_flags &= ~MASK_MULTIPLE;
+ if (TARGET_MULTIPLE_SET)
+ warning ("-mmultiple is not supported on little endian systems");
+ }
+
+ if (TARGET_STRING)
+ {
+ target_flags &= ~MASK_STRING;
+ if (TARGET_STRING_SET)
+ warning ("-mstring is not supported on little endian systems");
+ }
+ }
+
+ if (flag_pic && DEFAULT_ABI == ABI_AIX)
+ {
+ warning ("-f%s ignored (all code is position independent)",
+ (flag_pic > 1) ? "PIC" : "pic");
+ flag_pic = 0;
+ }
+
+#ifdef XCOFF_DEBUGGING_INFO
+ if (flag_function_sections && (write_symbols != NO_DEBUG)
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ warning ("-ffunction-sections disabled on AIX when debugging");
+ flag_function_sections = 0;
+ }
+
+ if (flag_data_sections && (DEFAULT_ABI == ABI_AIX))
+ {
+ warning ("-fdata-sections not supported on AIX");
+ flag_data_sections = 0;
+ }
+#endif
+
+ /* Set debug flags */
+ if (rs6000_debug_name)
+ {
+ if (! strcmp (rs6000_debug_name, "all"))
+ rs6000_debug_stack = rs6000_debug_arg = 1;
+ else if (! strcmp (rs6000_debug_name, "stack"))
+ rs6000_debug_stack = 1;
+ else if (! strcmp (rs6000_debug_name, "arg"))
+ rs6000_debug_arg = 1;
+ else
+ error ("unknown -mdebug-%s switch", rs6000_debug_name);
+ }
+
+ /* Set size of long double */
+ rs6000_long_double_type_size = 64;
+ if (rs6000_long_double_size_string)
+ {
+ char *tail;
+ int size = strtol (rs6000_long_double_size_string, &tail, 10);
+ if (*tail != '\0' || (size != 64 && size != 128))
+ error ("Unknown switch -mlong-double-%s",
+ rs6000_long_double_size_string);
+ else
+ rs6000_long_double_type_size = size;
+ }
+
+ /* Handle -mabi= options. */
+ rs6000_parse_abi_options ();
+
+#ifdef TARGET_REGNAMES
+ /* If the user desires alternate register names, copy in the
+ alternate names now. */
+ if (TARGET_REGNAMES)
+ memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
+#endif
+
+#ifdef SUBTARGET_OVERRIDE_OPTIONS
+ SUBTARGET_OVERRIDE_OPTIONS;
+#endif
+#ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
+ SUBSUBTARGET_OVERRIDE_OPTIONS;
+#endif
+
+ /* Set TARGET_AIX_STRUCT_RET last, after the ABI is determined.
+ If -maix-struct-return or -msvr4-struct-return was explicitly
+ used, don't override with the ABI default. */
+ if (!(target_flags & MASK_AIX_STRUCT_RET_SET))
+ {
+ if (DEFAULT_ABI == ABI_V4 && !DRAFT_V4_STRUCT_RET)
+ target_flags = (target_flags & ~MASK_AIX_STRUCT_RET);
+ else
+ target_flags |= MASK_AIX_STRUCT_RET;
+ }
+
+ /* Register global variables with the garbage collector. */
+ rs6000_add_gc_roots ();
+
+ /* Allocate an alias set for register saves & restores from stack. */
+ rs6000_sr_alias_set = new_alias_set ();
+
+ if (TARGET_TOC)
+ ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
+
+ /* We can only guarantee the availability of DI pseudo-ops when
+ assembling for 64-bit targets. */
+ if (!TARGET_64BIT)
+ {
+ targetm.asm_out.aligned_op.di = NULL;
+ targetm.asm_out.unaligned_op.di = NULL;
+ }
+
+ /* Arrange to save and restore machine status around nested functions. */
+ init_machine_status = rs6000_init_machine_status;
+ free_machine_status = rs6000_free_machine_status;
+}
+
+/* Handle -mabi= options. */
+static void
+rs6000_parse_abi_options ()
+{
+ if (rs6000_abi_string == 0)
+ return;
+ else if (! strcmp (rs6000_abi_string, "altivec"))
+ rs6000_altivec_abi = 1;
+ else
+ error ("unknown ABI specified: '%s'", rs6000_abi_string);
+}
+
+void
+optimization_options (level, size)
+ int level ATTRIBUTE_UNUSED;
+ int size ATTRIBUTE_UNUSED;
+{
+}
+
+/* Do anything needed at the start of the asm file. */
+
+void
+rs6000_file_start (file, default_cpu)
+ FILE *file;
+ const char *default_cpu;
+{
+ size_t i;
+ char buffer[80];
+ const char *start = buffer;
+ struct rs6000_cpu_select *ptr;
+
+ if (flag_verbose_asm)
+ {
+ sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
+ rs6000_select[0].string = default_cpu;
+
+ for (i = 0; i < ARRAY_SIZE (rs6000_select); i++)
+ {
+ ptr = &rs6000_select[i];
+ if (ptr->string != (char *)0 && ptr->string[0] != '\0')
+ {
+ fprintf (file, "%s %s%s", start, ptr->name, ptr->string);
+ start = "";
+ }
+ }
+
+#ifdef USING_ELFOS_H
+ switch (rs6000_sdata)
+ {
+ case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
+ case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
+ case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
+ case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
+ }
+
+ if (rs6000_sdata && g_switch_value)
+ {
+ fprintf (file, "%s -G %d", start, g_switch_value);
+ start = "";
+ }
+#endif
+
+ if (*start == '\0')
+ putc ('\n', file);
+ }
+}
+
+
+/* Create a CONST_DOUBLE from a string. */
+
+struct rtx_def *
+rs6000_float_const (string, mode)
+ const char *string;
+ enum machine_mode mode;
+{
+ REAL_VALUE_TYPE value;
+ value = REAL_VALUE_ATOF (string, mode);
+ return immed_real_const_1 (value, mode);
+}
+
+/* Return non-zero if this function is known to have a null epilogue. */
+
+int
+direct_return ()
+{
+ if (reload_completed)
+ {
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (info->first_gp_reg_save == 32
+ && info->first_fp_reg_save == 64
+ && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
+ && ! info->lr_save_p
+ && ! info->cr_save_p
+ && info->vrsave_mask == 0
+ && ! info->push_p)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Returns 1 always. */
+
+int
+any_operand (op, mode)
+ rtx op ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return 1;
+}
+
+/* Returns 1 if op is the count register. */
+int
+count_register_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != REG)
+ return 0;
+
+ if (REGNO (op) == COUNT_REGISTER_REGNUM)
+ return 1;
+
+ if (REGNO (op) > FIRST_PSEUDO_REGISTER)
+ return 1;
+
+ return 0;
+}
+
+int
+xer_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != REG)
+ return 0;
+
+ if (XER_REGNO_P (REGNO (op)))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if OP is a signed 8-bit constant. Int multiplication
+ by such constants completes more quickly. */
+
+int
+s8bit_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return ( GET_CODE (op) == CONST_INT
+ && (INTVAL (op) >= -128 && INTVAL (op) <= 127));
+}
+
+/* Return 1 if OP is a constant that can fit in a D field. */
+
+int
+short_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), 'I'));
+}
+
+/* Similar for an unsigned D field. */
+
+int
+u_short_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == CONST_INT
+ && CONST_OK_FOR_LETTER_P (INTVAL (op), 'K'));
+}
+
+/* Return 1 if OP is a CONST_INT that cannot fit in a signed D field. */
+
+int
+non_short_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x8000) >= 0x10000);
+}
+
+/* Returns 1 if OP is a CONST_INT that is a positive value
+ and an exact power of 2. */
+
+int
+exact_log2_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == CONST_INT
+ && INTVAL (op) > 0
+ && exact_log2 (INTVAL (op)) >= 0);
+}
+
+/* Returns 1 if OP is a register that is not special (i.e., not MQ,
+ ctr, or lr). */
+
+int
+gpc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ && (GET_CODE (op) != REG
+ || (REGNO (op) >= ARG_POINTER_REGNUM
+ && !XER_REGNO_P (REGNO (op)))
+ || REGNO (op) < MQ_REGNO));
+}
+
+/* Returns 1 if OP is either a pseudo-register or a register denoting a
+ CR field. */
+
+int
+cc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ && (GET_CODE (op) != REG
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || CR_REGNO_P (REGNO (op))));
+}
+
+/* Returns 1 if OP is either a pseudo-register or a register denoting a
+ CR field that isn't CR0. */
+
+int
+cc_reg_not_cr0_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ && (GET_CODE (op) != REG
+ || REGNO (op) >= FIRST_PSEUDO_REGISTER
+ || CR_REGNO_NOT_CR0_P (REGNO (op))));
+}
+
+/* Returns 1 if OP is either a constant integer valid for a D-field or
+ a non-special register. If a register, it must be in the proper
+ mode unless MODE is VOIDmode. */
+
+int
+reg_or_short_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return short_cint_operand (op, mode) || gpc_reg_operand (op, mode);
+}
+
+/* Similar, except check if the negation of the constant would be
+ valid for a D-field. */
+
+int
+reg_or_neg_short_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return CONST_OK_FOR_LETTER_P (INTVAL (op), 'P');
+
+ return gpc_reg_operand (op, mode);
+}
+
+/* Return 1 if the operand is either a register or an integer whose
+ high-order 16 bits are zero. */
+
+int
+reg_or_u_short_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return u_short_cint_operand (op, mode) || gpc_reg_operand (op, mode);
+}
+
+/* Return 1 is the operand is either a non-special register or ANY
+ constant integer. */
+
+int
+reg_or_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == CONST_INT || gpc_reg_operand (op, mode));
+}
+
+/* Return 1 is the operand is either a non-special register or ANY
+ 32-bit signed constant integer. */
+
+int
+reg_or_arith_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (gpc_reg_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+#if HOST_BITS_PER_WIDE_INT != 32
+ && ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80000000)
+ < (unsigned HOST_WIDE_INT) 0x100000000ll)
+#endif
+ ));
+}
+
+/* Return 1 is the operand is either a non-special register or a 32-bit
+ signed constant integer valid for 64-bit addition. */
+
+int
+reg_or_add_cint64_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (gpc_reg_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && INTVAL (op) < 0x7fff8000
+#if HOST_BITS_PER_WIDE_INT != 32
+ && ((unsigned HOST_WIDE_INT) (INTVAL (op) + 0x80008000)
+ < 0x100000000ll)
+#endif
+ ));
+}
+
+/* Return 1 is the operand is either a non-special register or a 32-bit
+ signed constant integer valid for 64-bit subtraction. */
+
+int
+reg_or_sub_cint64_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (gpc_reg_operand (op, mode)
+ || (GET_CODE (op) == CONST_INT
+ && (- INTVAL (op)) < 0x7fff8000
+#if HOST_BITS_PER_WIDE_INT != 32
+ && ((unsigned HOST_WIDE_INT) ((- INTVAL (op)) + 0x80008000)
+ < 0x100000000ll)
+#endif
+ ));
+}
+
+/* Return 1 is the operand is either a non-special register or ANY
+ 32-bit unsigned constant integer. */
+
+int
+reg_or_logical_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT)
+ {
+ if (GET_MODE_BITSIZE (mode) <= 32)
+ abort ();
+
+ if (INTVAL (op) < 0)
+ return 0;
+ }
+
+ return ((INTVAL (op) & GET_MODE_MASK (mode)
+ & (~ (unsigned HOST_WIDE_INT) 0xffffffff)) == 0);
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
+ || mode != DImode)
+ abort ();
+
+ return CONST_DOUBLE_HIGH (op) == 0;
+ }
+ else
+ return gpc_reg_operand (op, mode);
+}
+
+/* Return 1 if the operand is an operand that can be loaded via the GOT. */
+
+int
+got_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == SYMBOL_REF
+ || GET_CODE (op) == CONST
+ || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return 1 if the operand is a simple references that can be loaded via
+ the GOT (labels involving addition aren't allowed). */
+
+int
+got_no_const_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return the number of instructions it takes to form a constant in an
+ integer register. */
+
+static int
+num_insns_constant_wide (value)
+ HOST_WIDE_INT value;
+{
+ /* signed constant loadable with {cal|addi} */
+ if (CONST_OK_FOR_LETTER_P (value, 'I'))
+ return 1;
+
+ /* constant loadable with {cau|addis} */
+ else if (CONST_OK_FOR_LETTER_P (value, 'L'))
+ return 1;
+
+#if HOST_BITS_PER_WIDE_INT == 64
+ else if (TARGET_POWERPC64)
+ {
+ HOST_WIDE_INT low = value & 0xffffffff;
+ HOST_WIDE_INT high = value >> 32;
+
+ low = (low ^ 0x80000000) - 0x80000000; /* sign extend */
+
+ if (high == 0 && (low & 0x80000000) == 0)
+ return 2;
+
+ else if (high == -1 && (low & 0x80000000) != 0)
+ return 2;
+
+ else if (! low)
+ return num_insns_constant_wide (high) + 1;
+
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+#endif
+
+ else
+ return 2;
+}
+
+int
+num_insns_constant (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+#if HOST_BITS_PER_WIDE_INT == 64
+ if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
+ && mask64_operand (op, mode))
+ return 2;
+ else
+#endif
+ return num_insns_constant_wide (INTVAL (op));
+ }
+
+ else if (GET_CODE (op) == CONST_DOUBLE && mode == SFmode)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+ return num_insns_constant_wide ((HOST_WIDE_INT)l);
+ }
+
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+ long l[2];
+ REAL_VALUE_TYPE rv;
+ int endian = (WORDS_BIG_ENDIAN == 0);
+
+ if (mode == VOIDmode || mode == DImode)
+ {
+ high = CONST_DOUBLE_HIGH (op);
+ low = CONST_DOUBLE_LOW (op);
+ }
+ else
+ {
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+ high = l[endian];
+ low = l[1 - endian];
+ }
+
+ if (TARGET_32BIT)
+ return (num_insns_constant_wide (low)
+ + num_insns_constant_wide (high));
+
+ else
+ {
+ if (high == 0 && (low & 0x80000000) == 0)
+ return num_insns_constant_wide (low);
+
+ else if (high == -1 && (low & 0x80000000) != 0)
+ return num_insns_constant_wide (low);
+
+ else if (mask64_operand (op, mode))
+ return 2;
+
+ else if (low == 0)
+ return num_insns_constant_wide (high) + 1;
+
+ else
+ return (num_insns_constant_wide (high)
+ + num_insns_constant_wide (low) + 1);
+ }
+ }
+
+ else
+ abort ();
+}
+
+/* Return 1 if the operand is a CONST_DOUBLE and it can be put into a
+ register with one instruction per word. We only do this if we can
+ safely read CONST_DOUBLE_{LOW,HIGH}. */
+
+int
+easy_fp_constant (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != CONST_DOUBLE
+ || GET_MODE (op) != mode
+ || (GET_MODE_CLASS (mode) != MODE_FLOAT && mode != DImode))
+ return 0;
+
+ /* Consider all constants with -msoft-float to be easy. */
+ if (TARGET_SOFT_FLOAT && mode != DImode)
+ return 1;
+
+ /* If we are using V.4 style PIC, consider all constants to be hard. */
+ if (flag_pic && DEFAULT_ABI == ABI_V4)
+ return 0;
+
+#ifdef TARGET_RELOCATABLE
+ /* Similarly if we are using -mrelocatable, consider all constants
+ to be hard. */
+ if (TARGET_RELOCATABLE)
+ return 0;
+#endif
+
+ if (mode == DFmode)
+ {
+ long k[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ return (num_insns_constant_wide ((HOST_WIDE_INT)k[0]) == 1
+ && num_insns_constant_wide ((HOST_WIDE_INT)k[1]) == 1);
+ }
+
+ else if (mode == SFmode)
+ {
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ return num_insns_constant_wide (l) == 1;
+ }
+
+ else if (mode == DImode)
+ return ((TARGET_POWERPC64
+ && GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_LOW (op) == 0)
+ || (num_insns_constant (op, DImode) <= 2));
+
+ else if (mode == SImode)
+ return 1;
+ else
+ abort ();
+}
+
+/* Return 1 if the operand is 0.0. */
+int
+zero_fp_constant (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
+}
+
+/* Return 1 if the operand is in volatile memory. Note that during
+ the RTL generation phase, memory_operand does not return TRUE for
+ volatile memory references. So this function allows us to
+ recognize volatile references where its safe. */
+
+int
+volatile_mem_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ if (!MEM_VOLATILE_P (op))
+ return 0;
+
+ if (mode != GET_MODE (op))
+ return 0;
+
+ if (reload_completed)
+ return memory_operand (op, mode);
+
+ if (reload_in_progress)
+ return strict_memory_address_p (mode, XEXP (op, 0));
+
+ return memory_address_p (mode, XEXP (op, 0));
+}
+
+/* Return 1 if the operand is an offsettable memory operand. */
+
+int
+offsettable_mem_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == MEM)
+ && offsettable_address_p (reload_completed || reload_in_progress,
+ mode, XEXP (op, 0)));
+}
+
+/* Return 1 if the operand is either an easy FP constant (see above) or
+ memory. */
+
+int
+mem_or_easy_const_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return memory_operand (op, mode) || easy_fp_constant (op, mode);
+}
+
+/* Return 1 if the operand is either a non-special register or an item
+ that can be used as the operand of a `mode' add insn. */
+
+int
+add_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ return (CONST_OK_FOR_LETTER_P (INTVAL(op), 'I')
+ || CONST_OK_FOR_LETTER_P (INTVAL(op), 'L'));
+
+ return gpc_reg_operand (op, mode);
+}
+
+/* Return 1 if OP is a constant but not a valid add_operand. */
+
+int
+non_add_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x8000) >= 0x10000
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (op), 'L'));
+}
+
+/* Return 1 if the operand is a non-special register or a constant that
+ can be used as the operand of an OR or XOR insn on the RS/6000. */
+
+int
+logical_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ HOST_WIDE_INT opl, oph;
+
+ if (gpc_reg_operand (op, mode))
+ return 1;
+
+ if (GET_CODE (op) == CONST_INT)
+ {
+ opl = INTVAL (op) & GET_MODE_MASK (mode);
+
+#if HOST_BITS_PER_WIDE_INT <= 32
+ if (GET_MODE_BITSIZE (mode) > HOST_BITS_PER_WIDE_INT && opl < 0)
+ return 0;
+#endif
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE)
+ {
+ if (GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ abort ();
+
+ opl = CONST_DOUBLE_LOW (op);
+ oph = CONST_DOUBLE_HIGH (op);
+ if (oph != 0)
+ return 0;
+ }
+ else
+ return 0;
+
+ return ((opl & ~ (unsigned HOST_WIDE_INT) 0xffff) == 0
+ || (opl & ~ (unsigned HOST_WIDE_INT) 0xffff0000) == 0);
+}
+
+/* Return 1 if C is a constant that is not a logical operand (as
+ above), but could be split into one. */
+
+int
+non_logical_cint_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE)
+ && ! logical_operand (op, mode)
+ && reg_or_logical_cint_operand (op, mode));
+}
+
+/* Return 1 if C is a constant that can be encoded in a 32-bit mask on the
+ RS/6000. It is if there are no more than two 1->0 or 0->1 transitions.
+ Reject all ones and all zeros, since these should have been optimized
+ away and confuse the making of MB and ME. */
+
+int
+mask_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ HOST_WIDE_INT c, lsb;
+
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+
+ c = INTVAL (op);
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ if (c & 1)
+ c = ~c;
+
+ /* Reject all zeros or all ones. */
+ if (c == 0)
+ return 0;
+
+ /* Find the first transition. */
+ lsb = c & -c;
+
+ /* Invert to look for a second transition. */
+ c = ~c;
+
+ /* Erase first transition. */
+ c &= -lsb;
+
+ /* Find the second transition (if any). */
+ lsb = c & -c;
+
+ /* Match if all the bits above are 1's (or c is zero). */
+ return c == -lsb;
+}
+
+/* Return 1 if the operand is a constant that is a PowerPC64 mask.
+ It is if there are no more than one 1->0 or 0->1 transitions.
+ Reject all ones and all zeros, since these should have been optimized
+ away and confuse the making of MB and ME. */
+
+int
+mask64_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb;
+
+ /* We don't change the number of transitions by inverting,
+ so make sure we start with the LS bit zero. */
+ c = INTVAL (op);
+ if (c & 1)
+ c = ~c;
+
+ /* Reject all zeros or all ones. */
+ if (c == 0)
+ return 0;
+
+ /* Find the transition, and check that all bits above are 1's. */
+ lsb = c & -c;
+ return c == -lsb;
+ }
+ else if (GET_CODE (op) == CONST_DOUBLE
+ && (mode == VOIDmode || mode == DImode))
+ {
+ HOST_WIDE_INT low, high, lsb;
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = CONST_DOUBLE_HIGH (op);
+
+ low = CONST_DOUBLE_LOW (op);
+ if (low & 1)
+ {
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = ~high;
+ low = ~low;
+ }
+
+ if (low == 0)
+ {
+ if (HOST_BITS_PER_WIDE_INT >= 64 || high == 0)
+ return 0;
+
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ lsb = low & -low;
+ return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if the operand is either a non-special register or a constant
+ that can be used as the operand of a PowerPC64 logical AND insn. */
+
+int
+and64_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (fixed_regs[CR0_REGNO]) /* CR0 not available, don't do andi./andis. */
+ return (gpc_reg_operand (op, mode) || mask64_operand (op, mode));
+
+ return (logical_operand (op, mode) || mask64_operand (op, mode));
+}
+
+/* Return 1 if the operand is either a non-special register or a
+ constant that can be used as the operand of an RS/6000 logical AND insn. */
+
+int
+and_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (fixed_regs[CR0_REGNO]) /* CR0 not available, don't do andi./andis. */
+ return (gpc_reg_operand (op, mode) || mask_operand (op, mode));
+
+ return (logical_operand (op, mode) || mask_operand (op, mode));
+}
+
+/* Return 1 if the operand is a general register or memory operand. */
+
+int
+reg_or_mem_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (gpc_reg_operand (op, mode)
+ || memory_operand (op, mode)
+ || volatile_mem_operand (op, mode));
+}
+
+/* Return 1 if the operand is a general register or memory operand without
+ pre_inc or pre_dec which produces invalid form of PowerPC lwa
+ instruction. */
+
+int
+lwa_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ rtx inner = op;
+
+ if (reload_completed && GET_CODE (inner) == SUBREG)
+ inner = SUBREG_REG (inner);
+
+ return gpc_reg_operand (inner, mode)
+ || (memory_operand (inner, mode)
+ && GET_CODE (XEXP (inner, 0)) != PRE_INC
+ && GET_CODE (XEXP (inner, 0)) != PRE_DEC
+ && (GET_CODE (XEXP (inner, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (inner, 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (inner, 0), 1)) % 4 == 0));
+}
+
+/* Return 1 if the operand, used inside a MEM, is a valid first argument
+ to CALL. This is a SYMBOL_REF or a pseudo-register, which will be
+ forced to lr. */
+
+int
+call_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && GET_MODE (op) != mode)
+ return 0;
+
+ return (GET_CODE (op) == SYMBOL_REF
+ || (GET_CODE (op) == REG && REGNO (op) >= FIRST_PSEUDO_REGISTER));
+}
+
+/* Return 1 if the operand is a SYMBOL_REF for a function known to be in
+ this file and the function is not weakly defined. */
+
+int
+current_file_function_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == SYMBOL_REF
+ && (SYMBOL_REF_FLAG (op)
+ || (op == XEXP (DECL_RTL (current_function_decl), 0)
+ && ! DECL_WEAK (current_function_decl))));
+}
+
+/* Return 1 if this operand is a valid input for a move insn. */
+
+int
+input_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* Memory is always valid. */
+ if (memory_operand (op, mode))
+ return 1;
+
+ /* Only a tiny bit of handling for CONSTANT_P_RTX is necessary. */
+ if (GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+
+ /* For floating-point, easy constants are valid. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ && CONSTANT_P (op)
+ && easy_fp_constant (op, mode))
+ return 1;
+
+ /* Allow any integer constant. */
+ if (GET_MODE_CLASS (mode) == MODE_INT
+ && (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONST_DOUBLE))
+ return 1;
+
+ /* For floating-point or multi-word mode, the only remaining valid type
+ is a register. */
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return register_operand (op, mode);
+
+ /* The only cases left are integral modes one word or smaller (we
+ do not get called for MODE_CC values). These can be in any
+ register. */
+ if (register_operand (op, mode))
+ return 1;
+
+ /* A SYMBOL_REF referring to the TOC is valid. */
+ if (LEGITIMATE_CONSTANT_POOL_ADDRESS_P (op))
+ return 1;
+
+ /* A constant pool expression (relative to the TOC) is valid */
+ if (TOC_RELATIVE_EXPR_P (op))
+ return 1;
+
+ /* V.4 allows SYMBOL_REFs and CONSTs that are in the small data region
+ to be valid. */
+ if (DEFAULT_ABI == ABI_V4
+ && (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST)
+ && small_data_operand (op, Pmode))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 for an operand in small memory on V.4/eabi. */
+
+int
+small_data_operand (op, mode)
+ rtx op ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+#if TARGET_ELF
+ rtx sym_ref;
+
+ if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
+ return 0;
+
+ if (DEFAULT_ABI != ABI_V4)
+ return 0;
+
+ if (GET_CODE (op) == SYMBOL_REF)
+ sym_ref = op;
+
+ else if (GET_CODE (op) != CONST
+ || GET_CODE (XEXP (op, 0)) != PLUS
+ || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
+ return 0;
+
+ else
+ {
+ rtx sum = XEXP (op, 0);
+ HOST_WIDE_INT summand;
+
+ /* We have to be careful here, because it is the referenced address
+ that must be 32k from _SDA_BASE_, not just the symbol. */
+ summand = INTVAL (XEXP (sum, 1));
+ if (summand < 0 || summand > g_switch_value)
+ return 0;
+
+ sym_ref = XEXP (sum, 0);
+ }
+
+ if (*XSTR (sym_ref, 0) != '@')
+ return 0;
+
+ return 1;
+
+#else
+ return 0;
+#endif
+}
+
+static int
+constant_pool_expr_1 (op, have_sym, have_toc)
+ rtx op;
+ int *have_sym;
+ int *have_toc;
+{
+ switch (GET_CODE(op))
+ {
+ case SYMBOL_REF:
+ if (CONSTANT_POOL_ADDRESS_P (op))
+ {
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (op), Pmode))
+ {
+ *have_sym = 1;
+ return 1;
+ }
+ else
+ return 0;
+ }
+ else if (! strcmp (XSTR (op, 0), toc_label_name))
+ {
+ *have_toc = 1;
+ return 1;
+ }
+ else
+ return 0;
+ case PLUS:
+ case MINUS:
+ return constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc) &&
+ constant_pool_expr_1 (XEXP (op, 1), have_sym, have_toc);
+ case CONST:
+ return constant_pool_expr_1 (XEXP (op, 0), have_sym, have_toc);
+ case CONST_INT:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+int
+constant_pool_expr_p (op)
+ rtx op;
+{
+ int have_sym = 0;
+ int have_toc = 0;
+ return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_sym;
+}
+
+int
+toc_relative_expr_p (op)
+ rtx op;
+{
+ int have_sym = 0;
+ int have_toc = 0;
+ return constant_pool_expr_1 (op, &have_sym, &have_toc) && have_toc;
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This is used from only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was
+ called. In some cases it is useful to look at this to decide what
+ needs to be done.
+
+ MODE is passed so that this function can use GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this function to do nothing. It exists to
+ recognize opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+rtx
+rs6000_legitimize_address (x, oldx, mode)
+ rtx x;
+ rtx oldx ATTRIBUTE_UNUSED;
+ enum machine_mode mode;
+{
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && (unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000) >= 0x10000)
+ {
+ HOST_WIDE_INT high_int, low_int;
+ rtx sum;
+ high_int = INTVAL (XEXP (x, 1)) & (~ (HOST_WIDE_INT) 0xffff);
+ low_int = INTVAL (XEXP (x, 1)) & 0xffff;
+ if (low_int & 0x8000)
+ high_int += 0x10000, low_int |= ((HOST_WIDE_INT) -1) << 16;
+ sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ GEN_INT (high_int)), 0);
+ return gen_rtx_PLUS (Pmode, sum, GEN_INT (low_int));
+ }
+ else if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && GET_CODE (XEXP (x, 1)) != CONST_INT
+ && GET_MODE_NUNITS (mode) == 1
+ && (TARGET_HARD_FLOAT || TARGET_POWERPC64 || mode != DFmode)
+ && (TARGET_POWERPC64 || mode != DImode)
+ && mode != TImode)
+ {
+ return gen_rtx_PLUS (Pmode, XEXP (x, 0),
+ force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
+ }
+ else if (ALTIVEC_VECTOR_MODE (mode))
+ {
+ rtx reg;
+
+ /* Make sure both operands are registers. */
+ if (GET_CODE (x) == PLUS)
+ return gen_rtx_PLUS (Pmode, force_reg (Pmode, XEXP (x, 0)),
+ force_reg (Pmode, XEXP (x, 1)));
+
+ reg = force_reg (Pmode, x);
+ return reg;
+ }
+ else if (TARGET_ELF && TARGET_32BIT && TARGET_NO_TOC && ! flag_pic
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && GET_MODE_NUNITS (mode) == 1
+ && (GET_MODE_BITSIZE (mode) <= 32
+ || (TARGET_HARD_FLOAT && mode == DFmode)))
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_elf_high (reg, (x)));
+ return gen_rtx_LO_SUM (Pmode, reg, (x));
+ }
+ else if (TARGET_MACHO && TARGET_32BIT && TARGET_NO_TOC
+ && ! flag_pic
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x)
+ && (TARGET_HARD_FLOAT || mode != DFmode)
+ && mode != DImode
+ && mode != TImode)
+ {
+ rtx reg = gen_reg_rtx (Pmode);
+ emit_insn (gen_macho_high (reg, (x)));
+ return gen_rtx_LO_SUM (Pmode, reg, (x));
+ }
+ else if (TARGET_TOC
+ && CONSTANT_POOL_EXPR_P (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
+ {
+ return create_TOC_reference (x);
+ }
+ else
+ return NULL_RTX;
+}
+
+/* The convention appears to be to define this wherever it is used.
+ With legitimize_reload_address now defined here, REG_MODE_OK_FOR_BASE_P
+ is now used here. */
+#ifndef REG_MODE_OK_FOR_BASE_P
+#define REG_MODE_OK_FOR_BASE_P(REGNO, MODE) REG_OK_FOR_BASE_P (REGNO)
+#endif
+
+/* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
+ replace the input X, or the original X if no replacement is called for.
+ The output parameter *WIN is 1 if the calling macro should goto WIN,
+ 0 if it should not.
+
+ For RS/6000, we wish to handle large displacements off a base
+ register by splitting the addend across an addiu/addis and the mem insn.
+ This cuts number of extra insns needed from 3 to 1.
+
+ On Darwin, we use this to generate code for floating point constants.
+ A movsf_low is generated so we wind up with 2 instructions rather than 3.
+ The Darwin code is inside #if TARGET_MACHO because only then is
+ machopic_function_base_name() defined. */
+rtx
+rs6000_legitimize_reload_address (x, mode, opnum, type, ind_levels, win)
+ rtx x;
+ enum machine_mode mode;
+ int opnum;
+ int type;
+ int ind_levels ATTRIBUTE_UNUSED;
+ int *win;
+{
+ /* We must recognize output that we have already generated ourselves. */
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#if TARGET_MACHO
+ if (DEFAULT_ABI == ABI_DARWIN && flag_pic
+ && GET_CODE (x) == LO_SUM
+ && GET_CODE (XEXP (x, 0)) == PLUS
+ && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
+ && GET_CODE (XEXP (XEXP (XEXP (x, 0), 1), 0)) == CONST
+ && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
+ && GET_CODE (XEXP (XEXP (x, 1), 0)) == MINUS
+ && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (XEXP (x, 1), 0), 1)) == SYMBOL_REF)
+ {
+ /* Result of previous invocation of this function on Darwin
+ floating point constant. */
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#endif
+ if (GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
+ && REG_MODE_OK_FOR_BASE_P (XEXP (x, 0), mode)
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT high
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ /* Check for 32-bit overflow. */
+ if (high + low != val)
+ {
+ *win = 0;
+ return x;
+ }
+
+ /* Reload the high part into a base reg; leave the low part
+ in the mem directly. */
+
+ x = gen_rtx_PLUS (GET_MODE (x),
+ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
+ GEN_INT (high)),
+ GEN_INT (low));
+
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#if TARGET_MACHO
+ if (GET_CODE (x) == SYMBOL_REF
+ && DEFAULT_ABI == ABI_DARWIN
+ && flag_pic)
+ {
+ /* Darwin load of floating point constant. */
+ rtx offset = gen_rtx (CONST, Pmode,
+ gen_rtx (MINUS, Pmode, x,
+ gen_rtx (SYMBOL_REF, Pmode,
+ machopic_function_base_name ())));
+ x = gen_rtx (LO_SUM, GET_MODE (x),
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx,
+ gen_rtx (HIGH, Pmode, offset)), offset);
+ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
+ BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
+ opnum, (enum reload_type)type);
+ *win = 1;
+ return x;
+ }
+#endif
+ if (TARGET_TOC
+ && CONSTANT_POOL_EXPR_P (x)
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), mode))
+ {
+ (x) = create_TOC_reference (x);
+ *win = 1;
+ return x;
+ }
+ *win = 0;
+ return x;
+}
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid address: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode and DImode with an constant plus register,
+ we must ensure that both words are addressable or PowerPC64 with offset
+ word aligned.
+
+ For modes spanning multiple registers (DFmode in 32-bit GPRs,
+ 32-bit DImode, TImode), indexed addressing cannot be used because
+ adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+int
+rs6000_legitimate_address (mode, x, reg_ok_strict)
+ enum machine_mode mode;
+ rtx x;
+ int reg_ok_strict;
+{
+ if (LEGITIMATE_INDIRECT_ADDRESS_P (x, reg_ok_strict))
+ return 1;
+ if ((GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
+ && TARGET_UPDATE
+ && LEGITIMATE_INDIRECT_ADDRESS_P (XEXP (x, 0), reg_ok_strict))
+ return 1;
+ if (LEGITIMATE_SMALL_DATA_P (mode, x))
+ return 1;
+ if (LEGITIMATE_CONSTANT_POOL_ADDRESS_P (x))
+ return 1;
+ /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
+ if (! reg_ok_strict
+ && GET_CODE (x) == PLUS
+ && GET_CODE (XEXP (x, 0)) == REG
+ && XEXP (x, 0) == virtual_stack_vars_rtx
+ && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ return 1;
+ if (LEGITIMATE_OFFSET_ADDRESS_P (mode, x, reg_ok_strict))
+ return 1;
+ if (mode != TImode
+ && (TARGET_HARD_FLOAT || TARGET_POWERPC64 || mode != DFmode)
+ && (TARGET_POWERPC64 || mode != DImode)
+ && LEGITIMATE_INDEXED_ADDRESS_P (x, reg_ok_strict))
+ return 1;
+ if (LEGITIMATE_LO_SUM_ADDRESS_P (mode, x, reg_ok_strict))
+ return 1;
+ return 0;
+}
+
+/* Try to output insns to set TARGET equal to the constant C if it can
+ be done in less than N insns. Do all computations in MODE.
+ Returns the place where the output has been placed if it can be
+ done and the insns have been emitted. If it would take more than N
+ insns, zero is returned and no insns and emitted. */
+
+rtx
+rs6000_emit_set_const (dest, mode, source, n)
+ rtx dest, source;
+ enum machine_mode mode;
+ int n ATTRIBUTE_UNUSED;
+{
+ HOST_WIDE_INT c0, c1;
+
+ if (mode == QImode || mode == HImode || mode == SImode)
+ {
+ if (dest == NULL)
+ dest = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, dest, source));
+ return dest;
+ }
+
+ if (GET_CODE (source) == CONST_INT)
+ {
+ c0 = INTVAL (source);
+ c1 = -(c0 < 0);
+ }
+ else if (GET_CODE (source) == CONST_DOUBLE)
+ {
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = -(c0 < 0);
+#else
+ c0 = CONST_DOUBLE_LOW (source);
+ c1 = CONST_DOUBLE_HIGH (source);
+#endif
+ }
+ else
+ abort ();
+
+ return rs6000_emit_set_long_const (dest, c0, c1);
+}
+
+/* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
+ fall back to a straight forward decomposition. We do this to avoid
+ exponential run times encountered when looking for longer sequences
+ with rs6000_emit_set_const. */
+static rtx
+rs6000_emit_set_long_const (dest, c1, c2)
+ rtx dest;
+ HOST_WIDE_INT c1, c2;
+{
+ if (!TARGET_POWERPC64)
+ {
+ rtx operand1, operand2;
+
+ operand1 = operand_subword_force (dest, WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operand2 = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
+ DImode);
+ emit_move_insn (operand1, GEN_INT (c1));
+ emit_move_insn (operand2, GEN_INT (c2));
+ }
+ else
+ {
+ HOST_WIDE_INT ud1, ud2, ud3, ud4;
+
+ ud1 = c1 & 0xffff;
+ ud2 = (c1 & 0xffff0000) >> 16;
+#if HOST_BITS_PER_WIDE_INT >= 64
+ c2 = c1 >> 32;
+#endif
+ ud3 = c2 & 0xffff;
+ ud4 = (c2 & 0xffff0000) >> 16;
+
+ if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
+ {
+ if (ud1 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud1 ^ 0x8000) - 0x8000)));
+ else
+ emit_move_insn (dest, GEN_INT (ud1));
+ }
+
+ else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
+ || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
+ {
+ if (ud2 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud2 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud2 << 16));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ else if ((ud4 == 0xffff && (ud3 & 0x8000))
+ || (ud4 == 0 && ! (ud3 & 0x8000)))
+ {
+ if (ud3 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud3 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud3 << 16));
+
+ if (ud2 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud2)));
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (16)));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ else
+ {
+ if (ud4 & 0x8000)
+ emit_move_insn (dest, GEN_INT (((ud4 << 16) ^ 0x80000000)
+ - 0x80000000));
+ else
+ emit_move_insn (dest, GEN_INT (ud4 << 16));
+
+ if (ud3 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud3)));
+
+ emit_move_insn (dest, gen_rtx_ASHIFT (DImode, dest, GEN_INT (32)));
+ if (ud2 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest,
+ GEN_INT (ud2 << 16)));
+ if (ud1 != 0)
+ emit_move_insn (dest, gen_rtx_IOR (DImode, dest, GEN_INT (ud1)));
+ }
+ }
+ return dest;
+}
+
+/* Emit a move from SOURCE to DEST in mode MODE. */
+void
+rs6000_emit_move (dest, source, mode)
+ rtx dest;
+ rtx source;
+ enum machine_mode mode;
+{
+ rtx operands[2];
+ operands[0] = dest;
+ operands[1] = source;
+
+ /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && ! FLOAT_MODE_P (mode)
+ && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
+ {
+ /* FIXME. This should never happen. */
+ /* Since it seems that it does, do the safe thing and convert
+ to a CONST_INT. */
+ operands[1] =
+ GEN_INT (trunc_int_for_mode (CONST_DOUBLE_LOW (operands[1]), mode));
+ }
+ if (GET_CODE (operands[1]) == CONST_DOUBLE
+ && ! FLOAT_MODE_P (mode)
+ && ((CONST_DOUBLE_HIGH (operands[1]) == 0
+ && CONST_DOUBLE_LOW (operands[1]) >= 0)
+ || (CONST_DOUBLE_HIGH (operands[1]) == -1
+ && CONST_DOUBLE_LOW (operands[1]) < 0)))
+ abort ();
+
+ /* Check if GCC is setting up a block move that will end up using FP
+ registers as temporaries. We must make sure this is acceptable. */
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM
+ && mode == DImode
+ && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
+ || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
+ && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
+ ? 32 : MEM_ALIGN (operands[0])))
+ || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
+ ? 32
+ : MEM_ALIGN (operands[1]))))
+ && ! MEM_VOLATILE_P (operands [0])
+ && ! MEM_VOLATILE_P (operands [1]))
+ {
+ emit_move_insn (adjust_address (operands[0], SImode, 0),
+ adjust_address (operands[1], SImode, 0));
+ emit_move_insn (adjust_address (operands[0], SImode, 4),
+ adjust_address (operands[1], SImode, 4));
+ return;
+ }
+
+ if (! no_new_pseudos && GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (mode, operands[1]);
+
+ if (mode == SFmode && ! TARGET_POWERPC && TARGET_HARD_FLOAT
+ && GET_CODE (operands[0]) == MEM)
+ {
+ int regnum;
+
+ if (reload_in_progress || reload_completed)
+ regnum = true_regnum (operands[1]);
+ else if (GET_CODE (operands[1]) == REG)
+ regnum = REGNO (operands[1]);
+ else
+ regnum = -1;
+
+ /* If operands[1] is a register, on POWER it may have
+ double-precision data in it, so truncate it to single
+ precision. */
+ if (FP_REGNO_P (regnum) || regnum >= FIRST_PSEUDO_REGISTER)
+ {
+ rtx newreg;
+ newreg = (no_new_pseudos ? operands[1] : gen_reg_rtx (mode));
+ emit_insn (gen_aux_truncdfsf2 (newreg, operands[1]));
+ operands[1] = newreg;
+ }
+ }
+
+ /* Handle the case where reload calls us with an invalid address;
+ and the case of CONSTANT_P_RTX. */
+ if (! general_operand (operands[1], mode)
+ || ! nonimmediate_operand (operands[0], mode)
+ || GET_CODE (operands[1]) == CONSTANT_P_RTX)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+
+ /* FIXME: In the long term, this switch statement should go away
+ and be replaced by a sequence of tests based on things like
+ mode == Pmode. */
+ switch (mode)
+ {
+ case HImode:
+ case QImode:
+ if (CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != CONST_INT)
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case TFmode:
+ case DFmode:
+ case SFmode:
+ if (CONSTANT_P (operands[1])
+ && ! easy_fp_constant (operands[1], mode))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case V16QImode:
+ case V8HImode:
+ case V4SFmode:
+ case V4SImode:
+ /* fixme: aldyh -- allow vector constants when they are implemented. */
+ if (CONSTANT_P (operands[1]))
+ operands[1] = force_const_mem (mode, operands[1]);
+ break;
+
+ case SImode:
+ case DImode:
+ /* Use default pattern for address of ELF small data */
+ if (TARGET_ELF
+ && mode == Pmode
+ && DEFAULT_ABI == ABI_V4
+ && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)
+ && small_data_operand (operands[1], mode))
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+
+ if (DEFAULT_ABI == ABI_V4
+ && mode == Pmode && mode == SImode
+ && flag_pic == 1 && got_operand (operands[1], mode))
+ {
+ emit_insn (gen_movsi_got (operands[0], operands[1]));
+ return;
+ }
+
+ if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
+ && TARGET_NO_TOC && ! flag_pic
+ && mode == Pmode
+ && CONSTANT_P (operands[1])
+ && GET_CODE (operands[1]) != HIGH
+ && GET_CODE (operands[1]) != CONST_INT)
+ {
+ rtx target = (no_new_pseudos ? operands[0] : gen_reg_rtx (mode));
+
+ /* If this is a function address on -mcall-aixdesc,
+ convert it to the address of the descriptor. */
+ if (DEFAULT_ABI == ABI_AIX
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && XSTR (operands[1], 0)[0] == '.')
+ {
+ const char *name = XSTR (operands[1], 0);
+ rtx new_ref;
+ while (*name == '.')
+ name++;
+ new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
+ CONSTANT_POOL_ADDRESS_P (new_ref)
+ = CONSTANT_POOL_ADDRESS_P (operands[1]);
+ SYMBOL_REF_FLAG (new_ref) = SYMBOL_REF_FLAG (operands[1]);
+ SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
+ operands[1] = new_ref;
+ }
+
+ if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ emit_insn (gen_macho_high (target, operands[1]));
+ emit_insn (gen_macho_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ emit_insn (gen_elf_high (target, operands[1]));
+ emit_insn (gen_elf_low (operands[0], target, operands[1]));
+ return;
+ }
+
+ /* If this is a SYMBOL_REF that refers to a constant pool entry,
+ and we have put it in the TOC, we just need to make a TOC-relative
+ reference to it. */
+ if (TARGET_TOC
+ && GET_CODE (operands[1]) == SYMBOL_REF
+ && CONSTANT_POOL_EXPR_P (operands[1])
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands[1]),
+ get_pool_mode (operands[1])))
+ {
+ operands[1] = create_TOC_reference (operands[1]);
+ }
+ else if (mode == Pmode
+ && CONSTANT_P (operands[1])
+ && ((GET_CODE (operands[1]) != CONST_INT
+ && ! easy_fp_constant (operands[1], mode))
+ || (GET_CODE (operands[1]) == CONST_INT
+ && num_insns_constant (operands[1], mode) > 2)
+ || (GET_CODE (operands[0]) == REG
+ && FP_REGNO_P (REGNO (operands[0]))))
+ && GET_CODE (operands[1]) != HIGH
+ && ! LEGITIMATE_CONSTANT_POOL_ADDRESS_P (operands[1])
+ && ! TOC_RELATIVE_EXPR_P (operands[1]))
+ {
+ /* Emit a USE operation so that the constant isn't deleted if
+ expensive optimizations are turned on because nobody
+ references it. This should only be done for operands that
+ contain SYMBOL_REFs with CONSTANT_POOL_ADDRESS_P set.
+ This should not be done for operands that contain LABEL_REFs.
+ For now, we just handle the obvious case. */
+ if (GET_CODE (operands[1]) != LABEL_REF)
+ emit_insn (gen_rtx_USE (VOIDmode, operands[1]));
+
+#if TARGET_MACHO
+ /* Darwin uses a special PIC legitimizer. */
+ if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
+ {
+ operands[1] =
+ rs6000_machopic_legitimize_pic_address (operands[1], mode,
+ operands[0]);
+ if (operands[0] != operands[1])
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+ }
+#endif
+
+ /* If we are to limit the number of things we put in the TOC and
+ this is a symbol plus a constant we can add in one insn,
+ just put the symbol in the TOC and add the constant. Don't do
+ this if reload is in progress. */
+ if (GET_CODE (operands[1]) == CONST
+ && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
+ && GET_CODE (XEXP (operands[1], 0)) == PLUS
+ && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
+ && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
+ || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
+ && ! side_effects_p (operands[0]))
+ {
+ rtx sym =
+ force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
+ rtx other = XEXP (XEXP (operands[1], 0), 1);
+
+ sym = force_reg (mode, sym);
+ if (mode == SImode)
+ emit_insn (gen_addsi3 (operands[0], sym, other));
+ else
+ emit_insn (gen_adddi3 (operands[0], sym, other));
+ return;
+ }
+
+ operands[1] = force_const_mem (mode, operands[1]);
+
+ if (TARGET_TOC
+ && CONSTANT_POOL_EXPR_P (XEXP (operands[1], 0))
+ && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
+ get_pool_constant (XEXP (operands[1], 0)),
+ get_pool_mode (XEXP (operands[1], 0))))
+ {
+ operands[1]
+ = gen_rtx_MEM (mode,
+ create_TOC_reference (XEXP (operands[1], 0)));
+ set_mem_alias_set (operands[1], get_TOC_alias_set ());
+ RTX_UNCHANGING_P (operands[1]) = 1;
+ }
+ }
+ break;
+
+ case TImode:
+ if (GET_CODE (operands[0]) == MEM
+ && GET_CODE (XEXP (operands[0], 0)) != REG
+ && ! reload_in_progress)
+ operands[0]
+ = replace_equiv_address (operands[0],
+ copy_addr_to_reg (XEXP (operands[0], 0)));
+
+ if (GET_CODE (operands[1]) == MEM
+ && GET_CODE (XEXP (operands[1], 0)) != REG
+ && ! reload_in_progress)
+ operands[1]
+ = replace_equiv_address (operands[1],
+ copy_addr_to_reg (XEXP (operands[1], 0)));
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Above, we may have called force_const_mem which may have returned
+ an invalid address. If we can, fix this up; otherwise, reload will
+ have to deal with it. */
+ if (GET_CODE (operands[1]) == MEM
+ && ! memory_address_p (mode, XEXP (operands[1], 0))
+ && ! reload_in_progress)
+ operands[1] = adjust_address (operands[1], mode, 0);
+
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
+ return;
+}
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0.
+
+ For incoming args we set the number of arguments in the prototype large
+ so we never return a PARALLEL. */
+
+void
+init_cumulative_args (cum, fntype, libname, incoming)
+ CUMULATIVE_ARGS *cum;
+ tree fntype;
+ rtx libname ATTRIBUTE_UNUSED;
+ int incoming;
+{
+ static CUMULATIVE_ARGS zero_cumulative;
+
+ *cum = zero_cumulative;
+ cum->words = 0;
+ cum->fregno = FP_ARG_MIN_REG;
+ cum->vregno = ALTIVEC_ARG_MIN_REG;
+ cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
+ cum->call_cookie = CALL_NORMAL;
+ cum->sysv_gregno = GP_ARG_MIN_REG;
+
+ if (incoming)
+ cum->nargs_prototype = 1000; /* don't return a PARALLEL */
+
+ else if (cum->prototype)
+ cum->nargs_prototype = (list_length (TYPE_ARG_TYPES (fntype)) - 1
+ + (TYPE_MODE (TREE_TYPE (fntype)) == BLKmode
+ || RETURN_IN_MEMORY (TREE_TYPE (fntype))));
+
+ else
+ cum->nargs_prototype = 0;
+
+ cum->orig_nargs = cum->nargs_prototype;
+
+ /* Check for longcall's */
+ if (fntype && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype)))
+ cum->call_cookie = CALL_LONG;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "\ninit_cumulative_args:");
+ if (fntype)
+ {
+ tree ret_type = TREE_TYPE (fntype);
+ fprintf (stderr, " ret code = %s,",
+ tree_code_name[ (int)TREE_CODE (ret_type) ]);
+ }
+
+ if (cum->call_cookie & CALL_LONG)
+ fprintf (stderr, " longcall,");
+
+ fprintf (stderr, " proto = %d, nargs = %d\n",
+ cum->prototype, cum->nargs_prototype);
+ }
+}
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding.
+
+ For the AIX ABI structs are always stored left shifted in their
+ argument slot. */
+
+enum direction
+function_arg_padding (mode, type)
+ enum machine_mode mode;
+ tree type;
+{
+ if (type != 0 && AGGREGATE_TYPE_P (type))
+ return upward;
+
+ /* This is the default definition. */
+ return (! BYTES_BIG_ENDIAN
+ ? upward
+ : ((mode == BLKmode
+ ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT))
+ : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
+ ? downward : upward));
+}
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments.
+
+ V.4 wants long longs to be double word aligned. */
+
+int
+function_arg_boundary (mode, type)
+ enum machine_mode mode;
+ tree type ATTRIBUTE_UNUSED;
+{
+ if (DEFAULT_ABI == ABI_V4 && (mode == DImode || mode == DFmode))
+ return 64;
+ else if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ return 128;
+ else
+ return PARM_BOUNDARY;
+}
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+void
+function_arg_advance (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ cum->nargs_prototype--;
+
+ if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (cum->vregno <= ALTIVEC_ARG_MAX_REG && cum->nargs_prototype >= 0)
+ cum->vregno++;
+ else
+ cum->words += RS6000_ARG_SIZE (mode, type);
+ }
+ else if (DEFAULT_ABI == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT
+ && (mode == SFmode || mode == DFmode))
+ {
+ if (cum->fregno <= FP_ARG_V4_MAX_REG)
+ cum->fregno++;
+ else
+ {
+ if (mode == DFmode)
+ cum->words += cum->words & 1;
+ cum->words += RS6000_ARG_SIZE (mode, type);
+ }
+ }
+ else
+ {
+ int n_words;
+ int gregno = cum->sysv_gregno;
+
+ /* Aggregates and IEEE quad get passed by reference. */
+ if ((type && AGGREGATE_TYPE_P (type))
+ || mode == TFmode)
+ n_words = 1;
+ else
+ n_words = RS6000_ARG_SIZE (mode, type);
+
+ /* Long long is put in odd registers. */
+ if (n_words == 2 && (gregno & 1) == 0)
+ gregno += 1;
+
+ /* Long long is not split between registers and stack. */
+ if (gregno + n_words - 1 > GP_ARG_MAX_REG)
+ {
+ /* Long long is aligned on the stack. */
+ if (n_words == 2)
+ cum->words += cum->words & 1;
+ cum->words += n_words;
+ }
+
+ /* Note: continuing to accumulate gregno past when we've started
+ spilling to the stack indicates the fact that we've started
+ spilling to the stack to expand_builtin_saveregs. */
+ cum->sysv_gregno = gregno + n_words;
+ }
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
+ cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
+ fprintf (stderr, "mode = %4s, named = %d\n",
+ GET_MODE_NAME (mode), named);
+ }
+ }
+ else
+ {
+ int align = (TARGET_32BIT && (cum->words & 1) != 0
+ && function_arg_boundary (mode, type) == 64) ? 1 : 0;
+
+ cum->words += align + RS6000_ARG_SIZE (mode, type);
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT && TARGET_HARD_FLOAT)
+ cum->fregno++;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
+ cum->words, cum->fregno);
+ fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
+ cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
+ fprintf (stderr, "named = %d, align = %d\n", named, align);
+ }
+ }
+}
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. Under AIX, the first 13 FP args are in registers.
+ Under V.4, the first 8 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when TYPE is zero) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support PARALLEL anyway. */
+
+struct rtx_def *
+function_arg (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ enum rs6000_abi abi = DEFAULT_ABI;
+
+ /* Return a marker to indicate whether CR1 needs to set or clear the
+ bit that V.4 uses to say fp args were passed in registers.
+ Assume that we don't need the marker for software floating point,
+ or compiler generated library calls. */
+ if (mode == VOIDmode)
+ {
+ if (abi == ABI_V4
+ && TARGET_HARD_FLOAT
+ && cum->nargs_prototype < 0
+ && type && (cum->prototype || TARGET_NO_PROTOTYPE))
+ {
+ return GEN_INT (cum->call_cookie
+ | ((cum->fregno == FP_ARG_MIN_REG)
+ ? CALL_V4_SET_FP_ARGS
+ : CALL_V4_CLEAR_FP_ARGS));
+ }
+
+ return GEN_INT (cum->call_cookie);
+ }
+
+ if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
+ {
+ if (named && cum->vregno <= ALTIVEC_ARG_MAX_REG)
+ return gen_rtx_REG (mode, cum->vregno);
+ else
+ return NULL;
+ }
+ else if (abi == ABI_V4)
+ {
+ if (TARGET_HARD_FLOAT
+ && (mode == SFmode || mode == DFmode))
+ {
+ if (cum->fregno <= FP_ARG_V4_MAX_REG)
+ return gen_rtx_REG (mode, cum->fregno);
+ else
+ return NULL;
+ }
+ else
+ {
+ int n_words;
+ int gregno = cum->sysv_gregno;
+
+ /* Aggregates and IEEE quad get passed by reference. */
+ if ((type && AGGREGATE_TYPE_P (type))
+ || mode == TFmode)
+ n_words = 1;
+ else
+ n_words = RS6000_ARG_SIZE (mode, type);
+
+ /* Long long is put in odd registers. */
+ if (n_words == 2 && (gregno & 1) == 0)
+ gregno += 1;
+
+ /* Long long is not split between registers and stack. */
+ if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
+ return gen_rtx_REG (mode, gregno);
+ else
+ return NULL;
+ }
+ }
+ else
+ {
+ int align = (TARGET_32BIT && (cum->words & 1) != 0
+ && function_arg_boundary (mode, type) == 64) ? 1 : 0;
+ int align_words = cum->words + align;
+
+ if (type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
+ return NULL_RTX;
+
+ if (USE_FP_FOR_ARG_P (*cum, mode, type))
+ {
+ if (! type
+ || ((cum->nargs_prototype > 0)
+ /* IBM AIX extended its linkage convention definition always
+ to require FP args after register save area hole on the
+ stack. */
+ && (DEFAULT_ABI != ABI_AIX
+ || ! TARGET_XL_CALL
+ || (align_words < GP_ARG_NUM_REG))))
+ return gen_rtx_REG (mode, cum->fregno);
+
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ ((align_words >= GP_ARG_NUM_REG)
+ ? NULL_RTX
+ : (align_words
+ + RS6000_ARG_SIZE (mode, type)
+ > GP_ARG_NUM_REG
+ /* If this is partially on the stack, then
+ we only include the portion actually
+ in registers here. */
+ ? gen_rtx_REG (SImode,
+ GP_ARG_MIN_REG + align_words)
+ : gen_rtx_REG (mode,
+ GP_ARG_MIN_REG + align_words))),
+ const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ gen_rtx_REG (mode, cum->fregno),
+ const0_rtx)));
+ }
+ else if (align_words < GP_ARG_NUM_REG)
+ return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
+ else
+ return NULL_RTX;
+ }
+}
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+int
+function_arg_partial_nregs (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ if (DEFAULT_ABI == ABI_V4)
+ return 0;
+
+ if (USE_FP_FOR_ARG_P (*cum, mode, type)
+ || USE_ALTIVEC_FOR_ARG_P (*cum, mode, type))
+ {
+ if (cum->nargs_prototype >= 0)
+ return 0;
+ }
+
+ if (cum->words < GP_ARG_NUM_REG
+ && GP_ARG_NUM_REG < (cum->words + RS6000_ARG_SIZE (mode, type)))
+ {
+ int ret = GP_ARG_NUM_REG - cum->words;
+ if (ret && TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_partial_nregs: %d\n", ret);
+
+ return ret;
+ }
+
+ return 0;
+}
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type.
+
+ Under V.4, structures and unions are passed by reference. */
+
+int
+function_arg_pass_by_reference (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ if (DEFAULT_ABI == ABI_V4
+ && ((type && AGGREGATE_TYPE_P (type))
+ || mode == TFmode))
+ {
+ if (TARGET_DEBUG_ARG)
+ fprintf (stderr, "function_arg_pass_by_reference: aggregate\n");
+
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+void
+setup_incoming_varargs (cum, mode, type, pretend_size, no_rtl)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int *pretend_size;
+ int no_rtl;
+
+{
+ CUMULATIVE_ARGS next_cum;
+ int reg_size = TARGET_32BIT ? 4 : 8;
+ rtx save_area = NULL_RTX, mem;
+ int first_reg_offset, set;
+ tree fntype;
+ int stdarg_p;
+
+ fntype = TREE_TYPE (current_function_decl);
+ stdarg_p = (TYPE_ARG_TYPES (fntype) != 0
+ && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
+ != void_type_node));
+
+ /* For varargs, we do not want to skip the dummy va_dcl argument.
+ For stdargs, we do want to skip the last named argument. */
+ next_cum = *cum;
+ if (stdarg_p)
+ function_arg_advance (&next_cum, mode, type, 1);
+
+ if (DEFAULT_ABI == ABI_V4)
+ {
+ /* Indicate to allocate space on the stack for varargs save area. */
+ /* ??? Does this really have to be located at a magic spot on the
+ stack, or can we allocate this with assign_stack_local instead. */
+ cfun->machine->sysv_varargs_p = 1;
+ if (! no_rtl)
+ save_area = plus_constant (virtual_stack_vars_rtx,
+ - RS6000_VARARGS_SIZE);
+
+ first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
+ }
+ else
+ {
+ first_reg_offset = next_cum.words;
+ save_area = virtual_incoming_args_rtx;
+ cfun->machine->sysv_varargs_p = 0;
+
+ if (MUST_PASS_IN_STACK (mode, type))
+ first_reg_offset += RS6000_ARG_SIZE (TYPE_MODE (type), type);
+ }
+
+ set = get_varargs_alias_set ();
+ if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG)
+ {
+ mem = gen_rtx_MEM (BLKmode,
+ plus_constant (save_area,
+ first_reg_offset * reg_size)),
+ set_mem_alias_set (mem, set);
+ set_mem_align (mem, BITS_PER_WORD);
+
+ move_block_from_reg
+ (GP_ARG_MIN_REG + first_reg_offset, mem,
+ GP_ARG_NUM_REG - first_reg_offset,
+ (GP_ARG_NUM_REG - first_reg_offset) * UNITS_PER_WORD);
+
+ /* ??? Does ABI_V4 need this at all? */
+ *pretend_size = (GP_ARG_NUM_REG - first_reg_offset) * UNITS_PER_WORD;
+ }
+
+ /* Save FP registers if needed. */
+ if (DEFAULT_ABI == ABI_V4
+ && TARGET_HARD_FLOAT && ! no_rtl
+ && next_cum.fregno <= FP_ARG_V4_MAX_REG)
+ {
+ int fregno = next_cum.fregno;
+ rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
+ rtx lab = gen_label_rtx ();
+ int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG) * 8);
+
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_NE (VOIDmode, cr1,
+ const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode, lab),
+ pc_rtx)));
+
+ while (fregno <= FP_ARG_V4_MAX_REG)
+ {
+ mem = gen_rtx_MEM (DFmode, plus_constant (save_area, off));
+ set_mem_alias_set (mem, set);
+ emit_move_insn (mem, gen_rtx_REG (DFmode, fregno));
+ fregno++;
+ off += 8;
+ }
+
+ emit_label (lab);
+ }
+}
+
+/* Create the va_list data type. */
+
+tree
+rs6000_build_va_list ()
+{
+ tree f_gpr, f_fpr, f_ovf, f_sav, record, type_decl;
+
+ /* For AIX, prefer 'char *' because that's what the system
+ header files like. */
+ if (DEFAULT_ABI != ABI_V4)
+ return build_pointer_type (char_type_node);
+
+ record = make_lang_type (RECORD_TYPE);
+ type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
+
+ f_gpr = build_decl (FIELD_DECL, get_identifier ("gpr"),
+ unsigned_char_type_node);
+ f_fpr = build_decl (FIELD_DECL, get_identifier ("fpr"),
+ unsigned_char_type_node);
+ f_ovf = build_decl (FIELD_DECL, get_identifier ("overflow_arg_area"),
+ ptr_type_node);
+ f_sav = build_decl (FIELD_DECL, get_identifier ("reg_save_area"),
+ ptr_type_node);
+
+ DECL_FIELD_CONTEXT (f_gpr) = record;
+ DECL_FIELD_CONTEXT (f_fpr) = record;
+ DECL_FIELD_CONTEXT (f_ovf) = record;
+ DECL_FIELD_CONTEXT (f_sav) = record;
+
+ TREE_CHAIN (record) = type_decl;
+ TYPE_NAME (record) = type_decl;
+ TYPE_FIELDS (record) = f_gpr;
+ TREE_CHAIN (f_gpr) = f_fpr;
+ TREE_CHAIN (f_fpr) = f_ovf;
+ TREE_CHAIN (f_ovf) = f_sav;
+
+ layout_type (record);
+
+ /* The correct type is an array type of one element. */
+ return build_array_type (record, build_index_type (size_zero_node));
+}
+
+/* Implement va_start. */
+
+void
+rs6000_va_start (stdarg_p, valist, nextarg)
+ int stdarg_p;
+ tree valist;
+ rtx nextarg;
+{
+ HOST_WIDE_INT words, n_gpr, n_fpr;
+ tree f_gpr, f_fpr, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, t;
+
+ /* Only SVR4 needs something special. */
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ std_expand_builtin_va_start (stdarg_p, valist, nextarg);
+ return;
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_ovf = TREE_CHAIN (f_fpr);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
+ gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
+ fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
+ ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
+ sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
+
+ /* Count number of gp and fp argument registers used. */
+ words = current_function_args_info.words;
+ n_gpr = current_function_args_info.sysv_gregno - GP_ARG_MIN_REG;
+ n_fpr = current_function_args_info.fregno - FP_ARG_MIN_REG;
+
+ if (TARGET_DEBUG_ARG)
+ {
+ fputs ("va_start: words = ", stderr);
+ fprintf (stderr, HOST_WIDE_INT_PRINT_DEC, words);
+ fputs (", n_gpr = ", stderr);
+ fprintf (stderr, HOST_WIDE_INT_PRINT_DEC, n_gpr);
+ fputs (", n_fpr = ", stderr);
+ fprintf (stderr, HOST_WIDE_INT_PRINT_DEC, n_fpr);
+ putc ('\n', stderr);
+ }
+
+ t = build (MODIFY_EXPR, TREE_TYPE (gpr), gpr, build_int_2 (n_gpr, 0));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ t = build (MODIFY_EXPR, TREE_TYPE (fpr), fpr, build_int_2 (n_fpr, 0));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Find the overflow area. */
+ t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
+ if (words != 0)
+ t = build (PLUS_EXPR, TREE_TYPE (ovf), t,
+ build_int_2 (words * UNITS_PER_WORD, 0));
+ t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ /* Find the register save area. */
+ t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
+ t = build (PLUS_EXPR, TREE_TYPE (sav), t,
+ build_int_2 (-RS6000_VARARGS_SIZE, -1));
+ t = build (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+}
+
+/* Implement va_arg. */
+
+rtx
+rs6000_va_arg (valist, type)
+ tree valist, type;
+{
+ tree f_gpr, f_fpr, f_ovf, f_sav;
+ tree gpr, fpr, ovf, sav, reg, t, u;
+ int indirect_p, size, rsize, n_reg, sav_ofs, sav_scale;
+ rtx lab_false, lab_over, addr_rtx, r;
+
+ /* For AIX, the rule is that structures are passed left-aligned in
+ their stack slot. However, GCC does not presently do this:
+ structures which are the same size as integer types are passed
+ right-aligned, as if they were in fact integers. This only
+ matters for structures of size 1 or 2, or 4 when TARGET_64BIT. */
+ if (DEFAULT_ABI != ABI_V4)
+ {
+ HOST_WIDE_INT align, rounded_size;
+ enum machine_mode mode;
+ tree addr_tree;
+
+ /* Compute the rounded size of the type. */
+ align = PARM_BOUNDARY / BITS_PER_UNIT;
+ rounded_size = (((int_size_in_bytes (type) + align - 1) / align)
+ * align);
+
+ addr_tree = valist;
+
+ mode = TYPE_MODE (type);
+ if (mode != BLKmode)
+ {
+ HOST_WIDE_INT adj;
+ adj = TREE_INT_CST_LOW (TYPE_SIZE (type)) / BITS_PER_UNIT;
+ if (rounded_size > align)
+ adj = rounded_size;
+
+ addr_tree = build (PLUS_EXPR, TREE_TYPE (addr_tree), addr_tree,
+ build_int_2 (rounded_size - adj, 0));
+ }
+
+ addr_rtx = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
+ addr_rtx = copy_to_reg (addr_rtx);
+
+ /* Compute new value for AP. */
+ t = build (MODIFY_EXPR, TREE_TYPE (valist), valist,
+ build (PLUS_EXPR, TREE_TYPE (valist), valist,
+ build_int_2 (rounded_size, 0)));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ return addr_rtx;
+ }
+
+ f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
+ f_fpr = TREE_CHAIN (f_gpr);
+ f_ovf = TREE_CHAIN (f_fpr);
+ f_sav = TREE_CHAIN (f_ovf);
+
+ valist = build1 (INDIRECT_REF, TREE_TYPE (TREE_TYPE (valist)), valist);
+ gpr = build (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr);
+ fpr = build (COMPONENT_REF, TREE_TYPE (f_fpr), valist, f_fpr);
+ ovf = build (COMPONENT_REF, TREE_TYPE (f_ovf), valist, f_ovf);
+ sav = build (COMPONENT_REF, TREE_TYPE (f_sav), valist, f_sav);
+
+ size = int_size_in_bytes (type);
+ rsize = (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+
+ if (AGGREGATE_TYPE_P (type) || TYPE_MODE (type) == TFmode)
+ {
+ /* Aggregates and long doubles are passed by reference. */
+ indirect_p = 1;
+ reg = gpr;
+ n_reg = 1;
+ sav_ofs = 0;
+ sav_scale = 4;
+ size = rsize = UNITS_PER_WORD;
+ }
+ else if (FLOAT_TYPE_P (type) && ! TARGET_SOFT_FLOAT)
+ {
+ /* FP args go in FP registers, if present. */
+ indirect_p = 0;
+ reg = fpr;
+ n_reg = 1;
+ sav_ofs = 8*4;
+ sav_scale = 8;
+ }
+ else
+ {
+ /* Otherwise into GP registers. */
+ indirect_p = 0;
+ reg = gpr;
+ n_reg = rsize;
+ sav_ofs = 0;
+ sav_scale = 4;
+ }
+
+ /* Pull the value out of the saved registers ... */
+
+ lab_false = gen_label_rtx ();
+ lab_over = gen_label_rtx ();
+ addr_rtx = gen_reg_rtx (Pmode);
+
+ emit_cmp_and_jump_insns (expand_expr (reg, NULL_RTX, QImode, EXPAND_NORMAL),
+ GEN_INT (8 - n_reg + 1), GE, const1_rtx, QImode, 1,
+ lab_false);
+
+ /* Long long is aligned in the registers. */
+ if (n_reg > 1)
+ {
+ u = build (BIT_AND_EXPR, TREE_TYPE (reg), reg,
+ build_int_2 (n_reg - 1, 0));
+ u = build (PLUS_EXPR, TREE_TYPE (reg), reg, u);
+ u = build (MODIFY_EXPR, TREE_TYPE (reg), reg, u);
+ TREE_SIDE_EFFECTS (u) = 1;
+ expand_expr (u, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ if (sav_ofs)
+ t = build (PLUS_EXPR, ptr_type_node, sav, build_int_2 (sav_ofs, 0));
+ else
+ t = sav;
+
+ u = build (POSTINCREMENT_EXPR, TREE_TYPE (reg), reg, build_int_2 (n_reg, 0));
+ TREE_SIDE_EFFECTS (u) = 1;
+
+ u = build1 (CONVERT_EXPR, integer_type_node, u);
+ TREE_SIDE_EFFECTS (u) = 1;
+
+ u = build (MULT_EXPR, integer_type_node, u, build_int_2 (sav_scale, 0));
+ TREE_SIDE_EFFECTS (u) = 1;
+
+ t = build (PLUS_EXPR, ptr_type_node, t, u);
+ TREE_SIDE_EFFECTS (t) = 1;
+
+ r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
+ if (r != addr_rtx)
+ emit_move_insn (addr_rtx, r);
+
+ emit_jump_insn (gen_jump (lab_over));
+ emit_barrier ();
+ emit_label (lab_false);
+
+ /* ... otherwise out of the overflow area. */
+
+ /* Make sure we don't find reg 7 for the next int arg. */
+ if (n_reg > 1)
+ {
+ t = build (MODIFY_EXPR, TREE_TYPE (reg), reg, build_int_2 (8, 0));
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+ }
+
+ /* Care for on-stack alignment if needed. */
+ if (rsize <= 1)
+ t = ovf;
+ else
+ {
+ t = build (PLUS_EXPR, TREE_TYPE (ovf), ovf, build_int_2 (7, 0));
+ t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-8, -1));
+ }
+ t = save_expr (t);
+
+ r = expand_expr (t, addr_rtx, Pmode, EXPAND_NORMAL);
+ if (r != addr_rtx)
+ emit_move_insn (addr_rtx, r);
+
+ t = build (PLUS_EXPR, TREE_TYPE (t), t, build_int_2 (size, 0));
+ t = build (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
+ TREE_SIDE_EFFECTS (t) = 1;
+ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
+
+ emit_label (lab_over);
+
+ if (indirect_p)
+ {
+ r = gen_rtx_MEM (Pmode, addr_rtx);
+ set_mem_alias_set (r, get_varargs_alias_set ());
+ emit_move_insn (addr_rtx, r);
+ }
+
+ return addr_rtx;
+}
+
+/* Builtins. */
+
+#define def_builtin(MASK, NAME, TYPE, CODE) \
+do { \
+ if ((MASK) & target_flags) \
+ builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, NULL); \
+} while (0)
+
+struct builtin_description
+{
+ const unsigned int mask;
+ const enum insn_code icode;
+ const char *const name;
+ const enum rs6000_builtins code;
+};
+
+/* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
+
+static const struct builtin_description bdesc_3arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmaddfp, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhaddshs, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmhraddshs, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmladduhm, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM},
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumubm, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsummbm, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhm, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshm, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumuhs, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmsumshs, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnmsubfp, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_4sf, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_4si, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_8hi, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vperm_16qi, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_4sf, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_4si, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_8hi, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsel_16qi, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_16qi, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_8hi, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_4si, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsldoi_4sf, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF },
+};
+
+/* DST operations: void foo (void *, const int, const char). */
+
+static const struct builtin_description bdesc_dst[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_dst, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstt, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dstst, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST },
+ { MASK_ALTIVEC, CODE_FOR_altivec_dststt, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT }
+};
+
+/* Simple binary operations: VECc = foo (VECa, VECb). */
+
+static const struct builtin_description bdesc_2arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_addv16qi3, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM },
+ { MASK_ALTIVEC, CODE_FOR_addv8hi3, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM },
+ { MASK_ALTIVEC, CODE_FOR_addv4si3, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM },
+ { MASK_ALTIVEC, CODE_FOR_addv4sf3, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddcuw, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddubs, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsbs, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduhs, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddshs, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vadduws, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vaddsws, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS },
+ { MASK_ALTIVEC, CODE_FOR_andv4si3, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vandc, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgub, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsb, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguh, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsh, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavguw, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vavgsw, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfux, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcfsx, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctsxs, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vctuxs, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS },
+ { MASK_ALTIVEC, CODE_FOR_umaxv16qi3, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB },
+ { MASK_ALTIVEC, CODE_FOR_smaxv16qi3, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB },
+ { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH },
+ { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH },
+ { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghb, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghh, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrghw, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglb, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglh, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmrglw, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW },
+ { MASK_ALTIVEC, CODE_FOR_uminv16qi3, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB },
+ { MASK_ALTIVEC, CODE_FOR_sminv16qi3, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB },
+ { MASK_ALTIVEC, CODE_FOR_uminv8hi3, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH },
+ { MASK_ALTIVEC, CODE_FOR_sminv8hi3, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH },
+ { MASK_ALTIVEC, CODE_FOR_uminv4si3, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4si3, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW },
+ { MASK_ALTIVEC, CODE_FOR_sminv4sf3, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleub, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesb, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuleuh, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulesh, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmuloub, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosb, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulouh, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vmulosh, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vnor, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR },
+ { MASK_ALTIVEC, CODE_FOR_iorv4si3, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkpx, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhss, "__builtin_altivec_vpkuhss", ALTIVEC_BUILTIN_VPKUHSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshss, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwss, "__builtin_altivec_vpkuwss", ALTIVEC_BUILTIN_VPKUWSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswss, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuhus, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkshus, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkuwus, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vpkswus, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlb, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlh, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrlw, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslb, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslh, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslw, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsl, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vslo, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltb, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsplth, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltw, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsrb, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsrh, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsrw, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsrab, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsrah, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsraw, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsr, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsro, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO },
+ { MASK_ALTIVEC, CODE_FOR_subv16qi3, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM },
+ { MASK_ALTIVEC, CODE_FOR_subv8hi3, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM },
+ { MASK_ALTIVEC, CODE_FOR_subv4si3, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM },
+ { MASK_ALTIVEC, CODE_FOR_subv4sf3, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubcuw, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsububs, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsbs, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuhs, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubshs, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubuws, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsubsws, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4ubs, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4sbs, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum4shs, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsum2sws, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vsumsws, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS },
+ { MASK_ALTIVEC, CODE_FOR_xorv4si3, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpbfp_p, "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpeqfp_p, "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequb_p, "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequh_p, "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpequw_p, "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgefp_p, "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtfp_p, "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsb_p, "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsh_p, "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtsw_p, "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtub_p, "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuh_p, "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vcmpgtuw_p, "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P },
+};
+
+/* Simple unary operations: VECb = foo (unsigned literal) or VECb =
+ foo (VECa). */
+
+static const struct builtin_description bdesc_1arg[] =
+{
+ { MASK_ALTIVEC, CODE_FOR_altivec_vexptefp, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vlogefp, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrefp, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfim, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfin, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrfip, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP },
+ { MASK_ALTIVEC, CODE_FOR_ftruncv4sf2, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vrsqrtefp, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisb, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltish, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vspltisw, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsb, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhpx, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupkhsh, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsb, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklpx, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX },
+ { MASK_ALTIVEC, CODE_FOR_altivec_vupklsh, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH },
+};
+
+static rtx
+altivec_expand_unop_builtin (icode, arglist, target)
+ enum insn_code icode;
+ tree arglist;
+ rtx target;
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return NULL_RTX;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+static rtx
+altivec_expand_binop_builtin (icode, arglist, target)
+ enum insn_code icode;
+ tree arglist;
+ rtx target;
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node || arg1 == error_mark_node)
+ return NULL_RTX;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (target, op0, op1);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+
+static rtx
+altivec_expand_stv_builtin (icode, arglist)
+ enum insn_code icode;
+ tree arglist;
+{
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ rtx pat;
+ enum machine_mode mode0 = insn_data[icode].operand[0].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[2].mode;
+
+ /* Invalid arguments. Bail before doing anything stoopid! */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return NULL_RTX;
+
+ if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
+ op0 = copy_to_mode_reg (mode2, op0);
+ if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
+ op1 = copy_to_mode_reg (mode0, op1);
+ if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
+ op2 = copy_to_mode_reg (mode1, op2);
+
+ pat = GEN_FCN (icode) (op1, op2, op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+}
+
+static rtx
+altivec_expand_ternop_builtin (icode, arglist, target)
+ enum insn_code icode;
+ tree arglist;
+ rtx target;
+{
+ rtx pat;
+ tree arg0 = TREE_VALUE (arglist);
+ tree arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ tree arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ rtx op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2].mode;
+ enum machine_mode mode2 = insn_data[icode].operand[3].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return NULL_RTX;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+ if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
+ op2 = copy_to_mode_reg (mode2, op2);
+
+ pat = GEN_FCN (icode) (target, op0, op1, op2);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+
+ return target;
+}
+static rtx
+altivec_expand_builtin (exp, target)
+ tree exp;
+ rtx target;
+{
+ struct builtin_description *d;
+ size_t i;
+ enum insn_code icode;
+ tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
+ tree arglist = TREE_OPERAND (exp, 1);
+ tree arg0, arg1, arg2;
+ rtx op0, op1, op2, pat;
+ enum machine_mode tmode, mode0, mode1, mode2;
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_lvx_16qi;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_lvx_8hi;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
+ icode = CODE_FOR_altivec_lvx_4si;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_lvx_4sf;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ tmode = insn_data[icode].operand[0].mode;
+ mode0 = insn_data[icode].operand[1].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
+ icode = CODE_FOR_altivec_stvx_16qi;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
+ icode = CODE_FOR_altivec_stvx_8hi;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
+ icode = CODE_FOR_altivec_stvx_4si;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
+ icode = CODE_FOR_altivec_stvx_4sf;
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+ mode1 = insn_data[icode].operand[1].mode;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
+ if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ pat = GEN_FCN (icode) (op0, op1);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_STVX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx, arglist);
+ case ALTIVEC_BUILTIN_STVEBX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, arglist);
+ case ALTIVEC_BUILTIN_STVEHX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, arglist);
+ case ALTIVEC_BUILTIN_STVEWX:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, arglist);
+ case ALTIVEC_BUILTIN_STVXL:
+ return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl, arglist);
+
+ case ALTIVEC_BUILTIN_MFVSCR:
+ icode = CODE_FOR_altivec_mfvscr;
+ tmode = insn_data[icode].operand[0].mode;
+
+ if (target == 0
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ pat = GEN_FCN (icode) (target);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+
+ case ALTIVEC_BUILTIN_MTVSCR:
+ icode = CODE_FOR_altivec_mtvscr;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return NULL_RTX;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (op0);
+ if (pat)
+ emit_insn (pat);
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSSALL:
+ emit_insn (gen_altivec_dssall ());
+ return NULL_RTX;
+
+ case ALTIVEC_BUILTIN_DSS:
+ icode = CODE_FOR_altivec_dss;
+ arg0 = TREE_VALUE (arglist);
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[icode].operand[0].mode;
+
+ /* If we got invalid arguments bail out before generating bad rtl. */
+ if (arg0 == error_mark_node)
+ return NULL_RTX;
+
+ if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ emit_insn (gen_altivec_dss (op0));
+ return NULL_RTX;
+ }
+
+ /* Handle DST variants. */
+ d = (struct builtin_description *) bdesc_dst;
+ for (i = 0; i < sizeof (bdesc_dst) / sizeof *d; i++, d++)
+ if (d->code == fcode)
+ {
+ arg0 = TREE_VALUE (arglist);
+ arg1 = TREE_VALUE (TREE_CHAIN (arglist));
+ arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
+ op0 = expand_expr (arg0, NULL_RTX, VOIDmode, 0);
+ op1 = expand_expr (arg1, NULL_RTX, VOIDmode, 0);
+ op2 = expand_expr (arg2, NULL_RTX, VOIDmode, 0);
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+
+ /* Invalid arguments, bail out before generating bad rtl. */
+ if (arg0 == error_mark_node
+ || arg1 == error_mark_node
+ || arg2 == error_mark_node)
+ return NULL_RTX;
+
+ if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (GET_CODE (op2) != CONST_INT || INTVAL (op2) > 3)
+ {
+ error ("argument 3 of `%s' must be a 2-bit literal", d->name);
+ return NULL_RTX;
+ }
+
+ pat = GEN_FCN (d->icode) (op0, op1, op2);
+ if (pat != 0)
+ emit_insn (pat);
+
+ return NULL_RTX;
+ }
+
+ /* Handle simple unary operations. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < sizeof (bdesc_1arg) / sizeof *d; i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_unop_builtin (d->icode, arglist, target);
+
+ /* Handle simple binary operations. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < sizeof (bdesc_2arg) / sizeof *d; i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_binop_builtin (d->icode, arglist, target);
+
+ /* LV* are funky. We initialized them differently. */
+ switch (fcode)
+ {
+ case ALTIVEC_BUILTIN_LVSL:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvsl,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVSR:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvsr,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEBX:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvebx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEHX:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvehx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVEWX:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvewx,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVXL:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvxl,
+ arglist, target);
+ case ALTIVEC_BUILTIN_LVX:
+ return altivec_expand_binop_builtin (CODE_FOR_altivec_lvx,
+ arglist, target);
+ default:
+ break;
+ /* Fall through. */
+ }
+
+ /* Handle simple ternary operations. */
+ d = (struct builtin_description *) bdesc_3arg;
+ for (i = 0; i < sizeof (bdesc_3arg) / sizeof *d; i++, d++)
+ if (d->code == fcode)
+ return altivec_expand_ternop_builtin (d->icode, arglist, target);
+
+ abort ();
+ return NULL_RTX;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+rs6000_expand_builtin (exp, target, subtarget, mode, ignore)
+ tree exp;
+ rtx target;
+ rtx subtarget ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ int ignore ATTRIBUTE_UNUSED;
+{
+ if (TARGET_ALTIVEC)
+ return altivec_expand_builtin (exp, target);
+
+ abort ();
+}
+
+static void
+rs6000_init_builtins ()
+{
+ if (TARGET_ALTIVEC)
+ altivec_init_builtins ();
+}
+
+static void
+altivec_init_builtins (void)
+{
+ struct builtin_description * d;
+ size_t i;
+
+ tree endlink = void_list_node;
+
+ tree pint_type_node = build_pointer_type (integer_type_node);
+ tree pvoid_type_node = build_pointer_type (void_type_node);
+ tree pshort_type_node = build_pointer_type (short_integer_type_node);
+ tree pchar_type_node = build_pointer_type (char_type_node);
+ tree pfloat_type_node = build_pointer_type (float_type_node);
+
+ tree v4sf_ftype_v4sf_v4sf_v16qi
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE,
+ V16QI_type_node,
+ endlink))));
+ tree v4si_ftype_v4si_v4si_v16qi
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE,
+ V16QI_type_node,
+ endlink))));
+ tree v8hi_ftype_v8hi_v8hi_v16qi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE,
+ V16QI_type_node,
+ endlink))));
+ tree v16qi_ftype_v16qi_v16qi_v16qi
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE,
+ V16QI_type_node,
+ endlink))));
+
+ /* V4SI foo (char). */
+ tree v4si_ftype_char
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, char_type_node, endlink));
+
+ /* V8HI foo (char). */
+ tree v8hi_ftype_char
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, char_type_node, endlink));
+
+ /* V16QI foo (char). */
+ tree v16qi_ftype_char
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, char_type_node, endlink));
+ /* V4SF foo (V4SF). */
+ tree v4sf_ftype_v4sf
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node, endlink));
+
+ /* V4SI foo (int *). */
+ tree v4si_ftype_pint
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, pint_type_node, endlink));
+ /* V8HI foo (short *). */
+ tree v8hi_ftype_pshort
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, pshort_type_node, endlink));
+ /* V16QI foo (char *). */
+ tree v16qi_ftype_pchar
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, pchar_type_node, endlink));
+ /* V4SF foo (float *). */
+ tree v4sf_ftype_pfloat
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, pfloat_type_node, endlink));
+
+ /* V8HI foo (V16QI). */
+ tree v8hi_ftype_v16qi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node, endlink));
+
+ /* void foo (void *, int, char/literal). */
+ tree void_ftype_pvoid_int_char
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pvoid_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ /* void foo (int *, V4SI). */
+ tree void_ftype_pint_v4si
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pint_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+ /* void foo (short *, V8HI). */
+ tree void_ftype_pshort_v8hi
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pshort_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink)));
+ /* void foo (char *, V16QI). */
+ tree void_ftype_pchar_v16qi
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pchar_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink)));
+ /* void foo (float *, V4SF). */
+ tree void_ftype_pfloat_v4sf
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, pfloat_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ endlink)));
+
+ /* void foo (V4SI). */
+ tree void_ftype_v4si
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink));
+
+ /* void foo (vint, int, void *). */
+ tree void_ftype_v4si_int_pvoid
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ pvoid_type_node,
+ endlink))));
+
+ /* void foo (vchar, int, void *). */
+ tree void_ftype_v16qi_int_pvoid
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ pvoid_type_node,
+ endlink))));
+
+ /* void foo (vshort, int, void *). */
+ tree void_ftype_v8hi_int_pvoid
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE,
+ pvoid_type_node,
+ endlink))));
+
+ /* void foo (char). */
+ tree void_ftype_qi
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink));
+
+ /* void foo (void). */
+ tree void_ftype_void
+ = build_function_type (void_type_node,
+ tree_cons (NULL_TREE, void_type_node,
+ endlink));
+
+ /* vshort foo (void). */
+ tree v8hi_ftype_void
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, void_type_node,
+ endlink));
+
+ tree v4si_ftype_v4si_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+
+ /* These are for the unsigned 5 bit literals. */
+
+ tree v4sf_ftype_v4si_char
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink)));
+ tree v4si_ftype_v4sf_char
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink)));
+ tree v4si_ftype_v4si_char
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink)));
+ tree v8hi_ftype_v8hi_char
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink)));
+ tree v16qi_ftype_v16qi_char
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, char_type_node,
+ endlink)));
+
+ /* These are for the unsigned 4 bit literals. */
+
+ tree v16qi_ftype_v16qi_v16qi_char
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree v8hi_ftype_v8hi_v8hi_char
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree v4si_ftype_v4si_v4si_char
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ tree v4sf_ftype_v4sf_v4sf_char
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE,
+ char_type_node,
+ endlink))));
+
+ /* End of 4 bit literals. */
+
+ tree v4sf_ftype_v4sf_v4sf
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ endlink)));
+ tree v4sf_ftype_v4sf_v4sf_v4si
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE,
+ V4SI_type_node,
+ endlink))));
+ tree v4sf_ftype_v4sf_v4sf_v4sf
+ = build_function_type (V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE,
+ V4SF_type_node,
+ endlink))));
+ tree v4si_ftype_v4si_v4si_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE,
+ V4SI_type_node,
+ endlink))));
+
+ tree v8hi_ftype_v8hi_v8hi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink)));
+ tree v8hi_ftype_v8hi_v8hi_v8hi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE,
+ V8HI_type_node,
+ endlink))));
+ tree v4si_ftype_v8hi_v8hi_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE,
+ V4SI_type_node,
+ endlink))));
+ tree v4si_ftype_v16qi_v16qi_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE,
+ V4SI_type_node,
+ endlink))));
+
+ tree v16qi_ftype_v16qi_v16qi
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v4sf_v4sf
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ endlink)));
+
+ tree v8hi_ftype_v16qi_v16qi
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v8hi_v8hi
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink)));
+
+ tree v8hi_ftype_v4si_v4si
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+
+ tree v16qi_ftype_v8hi_v8hi
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v16qi_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v16qi_v16qi
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v8hi_v4si
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+
+ tree v4si_ftype_v8hi
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node, endlink));
+
+ tree int_ftype_v4si_v4si
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ tree_cons (NULL_TREE, V4SI_type_node,
+ endlink)));
+
+ tree int_ftype_v4sf_v4sf
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ tree_cons (NULL_TREE, V4SF_type_node,
+ endlink)));
+
+ tree int_ftype_v16qi_v16qi
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ tree_cons (NULL_TREE, V16QI_type_node,
+ endlink)));
+
+ tree v16qi_ftype_int_pvoid
+ = build_function_type (V16QI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, pvoid_type_node,
+ endlink)));
+
+ tree v4si_ftype_int_pvoid
+ = build_function_type (V4SI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, pvoid_type_node,
+ endlink)));
+
+ tree v8hi_ftype_int_pvoid
+ = build_function_type (V8HI_type_node,
+ tree_cons (NULL_TREE, integer_type_node,
+ tree_cons (NULL_TREE, pvoid_type_node,
+ endlink)));
+
+ tree int_ftype_v8hi_v8hi
+ = build_function_type (integer_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ tree_cons (NULL_TREE, V8HI_type_node,
+ endlink)));
+
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pfloat, ALTIVEC_BUILTIN_LD_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf, ALTIVEC_BUILTIN_ST_INTERNAL_4sf);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_4si", v4si_ftype_pint, ALTIVEC_BUILTIN_LD_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si, ALTIVEC_BUILTIN_ST_INTERNAL_4si);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pshort, ALTIVEC_BUILTIN_LD_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi, ALTIVEC_BUILTIN_ST_INTERNAL_8hi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pchar, ALTIVEC_BUILTIN_LD_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi, ALTIVEC_BUILTIN_ST_INTERNAL_16qi);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_dss", void_ftype_qi, ALTIVEC_BUILTIN_DSS);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsl", v16qi_ftype_int_pvoid, ALTIVEC_BUILTIN_LVSL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvsr", v16qi_ftype_int_pvoid, ALTIVEC_BUILTIN_LVSR);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvebx", v16qi_ftype_int_pvoid, ALTIVEC_BUILTIN_LVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvehx", v8hi_ftype_int_pvoid, ALTIVEC_BUILTIN_LVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvewx", v4si_ftype_int_pvoid, ALTIVEC_BUILTIN_LVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvxl", v4si_ftype_int_pvoid, ALTIVEC_BUILTIN_LVXL);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_lvx", v4si_ftype_int_pvoid, ALTIVEC_BUILTIN_LVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvx", void_ftype_v4si_int_pvoid, ALTIVEC_BUILTIN_STVX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvebx", void_ftype_v16qi_int_pvoid, ALTIVEC_BUILTIN_STVEBX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvehx", void_ftype_v8hi_int_pvoid, ALTIVEC_BUILTIN_STVEHX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvewx", void_ftype_v4si_int_pvoid, ALTIVEC_BUILTIN_STVEWX);
+ def_builtin (MASK_ALTIVEC, "__builtin_altivec_stvxl", void_ftype_v4si_int_pvoid, ALTIVEC_BUILTIN_STVXL);
+
+ /* Add the simple ternary operators. */
+ d = (struct builtin_description *) bdesc_3arg;
+ for (i = 0; i < sizeof (bdesc_3arg) / sizeof *d; i++, d++)
+ {
+
+ enum machine_mode mode0, mode1, mode2, mode3;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+ mode3 = insn_data[d->icode].operand[3].mode;
+
+ /* When all four are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2 && mode2 == mode3)
+ {
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v4sf;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v8hi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ abort();
+ }
+ }
+ else if (mode0 == mode1 && mode1 == mode2 && mode3 == V16QImode)
+ {
+ switch (mode0)
+ {
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si_v16qi;
+ break;
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi_v16qi;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi_v16qi;
+ break;
+ default:
+ abort();
+ }
+ }
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v16qi_v16qi_v4si;
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode
+ && mode3 == V4SImode)
+ type = v4si_ftype_v8hi_v8hi_v4si;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode && mode2 == V4SFmode
+ && mode3 == V4SImode)
+ type = v4sf_ftype_v4sf_v4sf_v4si;
+
+ /* vchar, vchar, vchar, 4 bit literal. */
+ else if (mode0 == V16QImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v16qi_ftype_v16qi_v16qi_char;
+
+ /* vshort, vshort, vshort, 4 bit literal. */
+ else if (mode0 == V8HImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v8hi_ftype_v8hi_v8hi_char;
+
+ /* vint, vint, vint, 4 bit literal. */
+ else if (mode0 == V4SImode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4si_ftype_v4si_v4si_char;
+
+ /* vfloat, vfloat, vfloat, 4 bit literal. */
+ else if (mode0 == V4SFmode && mode1 == mode0 && mode2 == mode0
+ && mode3 == QImode)
+ type = v4sf_ftype_v4sf_v4sf_char;
+
+ else
+ abort ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the DST variants. */
+ d = (struct builtin_description *) bdesc_dst;
+ for (i = 0; i < sizeof (bdesc_dst) / sizeof *d; i++, d++)
+ def_builtin (d->mask, d->name, void_ftype_pvoid_int_char, d->code);
+
+ /* Add the simple binary operators. */
+ d = (struct builtin_description *) bdesc_2arg;
+ for (i = 0; i < sizeof (bdesc_2arg) / sizeof *d; i++, d++)
+ {
+ enum machine_mode mode0, mode1, mode2;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+ mode2 = insn_data[d->icode].operand[2].mode;
+
+ /* When all three operands are of the same mode. */
+ if (mode0 == mode1 && mode1 == mode2)
+ {
+ switch (mode0)
+ {
+ case V4SFmode:
+ type = v4sf_ftype_v4sf_v4sf;
+ break;
+ case V4SImode:
+ type = v4si_ftype_v4si_v4si;
+ break;
+ case V16QImode:
+ type = v16qi_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = v8hi_ftype_v8hi_v8hi;
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ /* A few other combos we really don't want to do manually. */
+
+ /* vint, vfloat, vfloat. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == V4SFmode)
+ type = v4si_ftype_v4sf_v4sf;
+
+ /* vshort, vchar, vchar. */
+ else if (mode0 == V8HImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v8hi_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vshort. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v4si_ftype_v8hi_v8hi;
+
+ /* vshort, vint, vint. */
+ else if (mode0 == V8HImode && mode1 == V4SImode && mode2 == V4SImode)
+ type = v8hi_ftype_v4si_v4si;
+
+ /* vchar, vshort, vshort. */
+ else if (mode0 == V16QImode && mode1 == V8HImode && mode2 == V8HImode)
+ type = v16qi_ftype_v8hi_v8hi;
+
+ /* vint, vchar, vint. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V4SImode)
+ type = v4si_ftype_v16qi_v4si;
+
+ /* vint, vchar, vchar. */
+ else if (mode0 == V4SImode && mode1 == V16QImode && mode2 == V16QImode)
+ type = v4si_ftype_v16qi_v16qi;
+
+ /* vint, vshort, vint. */
+ else if (mode0 == V4SImode && mode1 == V8HImode && mode2 == V4SImode)
+ type = v4si_ftype_v8hi_v4si;
+
+ /* vint, vint, 5 bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SImode && mode2 == QImode)
+ type = v4si_ftype_v4si_char;
+
+ /* vshort, vshort, 5 bit literal. */
+ else if (mode0 == V8HImode && mode1 == V8HImode && mode2 == QImode)
+ type = v8hi_ftype_v8hi_char;
+
+ /* vchar, vchar, 5 bit literal. */
+ else if (mode0 == V16QImode && mode1 == V16QImode && mode2 == QImode)
+ type = v16qi_ftype_v16qi_char;
+
+ /* vfloat, vint, 5 bit literal. */
+ else if (mode0 == V4SFmode && mode1 == V4SImode && mode2 == QImode)
+ type = v4sf_ftype_v4si_char;
+
+ /* vint, vfloat, 5 bit literal. */
+ else if (mode0 == V4SImode && mode1 == V4SFmode && mode2 == QImode)
+ type = v4si_ftype_v4sf_char;
+
+ /* int, x, x. */
+ else if (mode0 == SImode)
+ {
+ switch (mode1)
+ {
+ case V4SImode:
+ type = int_ftype_v4si_v4si;
+ break;
+ case V4SFmode:
+ type = int_ftype_v4sf_v4sf;
+ break;
+ case V16QImode:
+ type = int_ftype_v16qi_v16qi;
+ break;
+ case V8HImode:
+ type = int_ftype_v8hi_v8hi;
+ break;
+ default:
+ abort ();
+ }
+ }
+
+ else
+ abort ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+
+ /* Add the simple unary operators. */
+ d = (struct builtin_description *) bdesc_1arg;
+ for (i = 0; i < sizeof (bdesc_1arg) / sizeof *d; i++, d++)
+ {
+ enum machine_mode mode0, mode1;
+ tree type;
+
+ if (d->name == 0)
+ continue;
+
+ mode0 = insn_data[d->icode].operand[0].mode;
+ mode1 = insn_data[d->icode].operand[1].mode;
+
+ if (mode0 == V4SImode && mode1 == QImode)
+ type = v4si_ftype_char;
+ else if (mode0 == V8HImode && mode1 == QImode)
+ type = v8hi_ftype_char;
+ else if (mode0 == V16QImode && mode1 == QImode)
+ type = v16qi_ftype_char;
+ else if (mode0 == V4SFmode && mode1 == V4SFmode)
+ type = v4sf_ftype_v4sf;
+ else if (mode0 == V8HImode && mode1 == V16QImode)
+ type = v8hi_ftype_v16qi;
+ else if (mode0 == V4SImode && mode1 == V8HImode)
+ type = v4si_ftype_v8hi;
+ else
+ abort ();
+
+ def_builtin (d->mask, d->name, type, d->code);
+ }
+}
+
+
+/* Generate a memory reference for expand_block_move, copying volatile,
+ and other bits from an original memory reference. */
+
+static rtx
+expand_block_move_mem (mode, addr, orig_mem)
+ enum machine_mode mode;
+ rtx addr;
+ rtx orig_mem;
+{
+ rtx mem = gen_rtx_MEM (mode, addr);
+
+ MEM_COPY_ATTRIBUTES (mem, orig_mem);
+ return mem;
+}
+
+/* Expand a block move operation, and return 1 if successful. Return 0
+ if we should let the compiler generate normal code.
+
+ operands[0] is the destination
+ operands[1] is the source
+ operands[2] is the length
+ operands[3] is the alignment */
+
+#define MAX_MOVE_REG 4
+
+int
+expand_block_move (operands)
+ rtx operands[];
+{
+ rtx orig_dest = operands[0];
+ rtx orig_src = operands[1];
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ int constp = (GET_CODE (bytes_rtx) == CONST_INT);
+ int align;
+ int bytes;
+ int offset;
+ int num_reg;
+ int i;
+ rtx src_reg;
+ rtx dest_reg;
+ rtx src_addr;
+ rtx dest_addr;
+ rtx tmp_reg;
+ rtx stores[MAX_MOVE_REG];
+ int move_bytes;
+
+ /* If this is not a fixed size move, just call memcpy */
+ if (! constp)
+ return 0;
+
+ /* If this is not a fixed size alignment, abort */
+ if (GET_CODE (align_rtx) != CONST_INT)
+ abort ();
+ align = INTVAL (align_rtx);
+
+ /* Anything to move? */
+ bytes = INTVAL (bytes_rtx);
+ if (bytes <= 0)
+ return 1;
+
+ /* store_one_arg depends on expand_block_move to handle at least the size of
+ reg_parm_stack_space. */
+ if (bytes > (TARGET_POWERPC64 ? 64 : 32))
+ return 0;
+
+ /* Move the address into scratch registers. */
+ dest_reg = copy_addr_to_reg (XEXP (orig_dest, 0));
+ src_reg = copy_addr_to_reg (XEXP (orig_src, 0));
+
+ if (TARGET_STRING) /* string instructions are available */
+ {
+ for ( ; bytes > 0; bytes -= move_bytes)
+ {
+ if (bytes > 24 /* move up to 32 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10]
+ && ! fixed_regs[11]
+ && ! fixed_regs[12])
+ {
+ move_bytes = (bytes > 32) ? 32 : bytes;
+ emit_insn (gen_movstrsi_8reg (expand_block_move_mem (BLKmode,
+ dest_reg,
+ orig_dest),
+ expand_block_move_mem (BLKmode,
+ src_reg,
+ orig_src),
+ GEN_INT ((move_bytes == 32)
+ ? 0 : move_bytes),
+ align_rtx));
+ }
+ else if (bytes > 16 /* move up to 24 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8]
+ && ! fixed_regs[9]
+ && ! fixed_regs[10])
+ {
+ move_bytes = (bytes > 24) ? 24 : bytes;
+ emit_insn (gen_movstrsi_6reg (expand_block_move_mem (BLKmode,
+ dest_reg,
+ orig_dest),
+ expand_block_move_mem (BLKmode,
+ src_reg,
+ orig_src),
+ GEN_INT (move_bytes),
+ align_rtx));
+ }
+ else if (bytes > 8 /* move up to 16 bytes at a time */
+ && ! fixed_regs[5]
+ && ! fixed_regs[6]
+ && ! fixed_regs[7]
+ && ! fixed_regs[8])
+ {
+ move_bytes = (bytes > 16) ? 16 : bytes;
+ emit_insn (gen_movstrsi_4reg (expand_block_move_mem (BLKmode,
+ dest_reg,
+ orig_dest),
+ expand_block_move_mem (BLKmode,
+ src_reg,
+ orig_src),
+ GEN_INT (move_bytes),
+ align_rtx));
+ }
+ else if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 8 || (! STRICT_ALIGNMENT && align >= 4)))
+ {
+ move_bytes = 8;
+ tmp_reg = gen_reg_rtx (DImode);
+ emit_move_insn (tmp_reg,
+ expand_block_move_mem (DImode,
+ src_reg, orig_src));
+ emit_move_insn (expand_block_move_mem (DImode,
+ dest_reg, orig_dest),
+ tmp_reg);
+ }
+ else if (bytes > 4 && !TARGET_POWERPC64)
+ { /* move up to 8 bytes at a time */
+ move_bytes = (bytes > 8) ? 8 : bytes;
+ emit_insn (gen_movstrsi_2reg (expand_block_move_mem (BLKmode,
+ dest_reg,
+ orig_dest),
+ expand_block_move_mem (BLKmode,
+ src_reg,
+ orig_src),
+ GEN_INT (move_bytes),
+ align_rtx));
+ }
+ else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT))
+ { /* move 4 bytes */
+ move_bytes = 4;
+ tmp_reg = gen_reg_rtx (SImode);
+ emit_move_insn (tmp_reg,
+ expand_block_move_mem (SImode,
+ src_reg, orig_src));
+ emit_move_insn (expand_block_move_mem (SImode,
+ dest_reg, orig_dest),
+ tmp_reg);
+ }
+ else if (bytes == 2 && (align >= 2 || ! STRICT_ALIGNMENT))
+ { /* move 2 bytes */
+ move_bytes = 2;
+ tmp_reg = gen_reg_rtx (HImode);
+ emit_move_insn (tmp_reg,
+ expand_block_move_mem (HImode,
+ src_reg, orig_src));
+ emit_move_insn (expand_block_move_mem (HImode,
+ dest_reg, orig_dest),
+ tmp_reg);
+ }
+ else if (bytes == 1) /* move 1 byte */
+ {
+ move_bytes = 1;
+ tmp_reg = gen_reg_rtx (QImode);
+ emit_move_insn (tmp_reg,
+ expand_block_move_mem (QImode,
+ src_reg, orig_src));
+ emit_move_insn (expand_block_move_mem (QImode,
+ dest_reg, orig_dest),
+ tmp_reg);
+ }
+ else
+ { /* move up to 4 bytes at a time */
+ move_bytes = (bytes > 4) ? 4 : bytes;
+ emit_insn (gen_movstrsi_1reg (expand_block_move_mem (BLKmode,
+ dest_reg,
+ orig_dest),
+ expand_block_move_mem (BLKmode,
+ src_reg,
+ orig_src),
+ GEN_INT (move_bytes),
+ align_rtx));
+ }
+
+ if (bytes > move_bytes)
+ {
+ if (! TARGET_POWERPC64)
+ {
+ emit_insn (gen_addsi3 (src_reg, src_reg,
+ GEN_INT (move_bytes)));
+ emit_insn (gen_addsi3 (dest_reg, dest_reg,
+ GEN_INT (move_bytes)));
+ }
+ else
+ {
+ emit_insn (gen_adddi3 (src_reg, src_reg,
+ GEN_INT (move_bytes)));
+ emit_insn (gen_adddi3 (dest_reg, dest_reg,
+ GEN_INT (move_bytes)));
+ }
+ }
+ }
+ }
+
+ else /* string instructions not available */
+ {
+ num_reg = offset = 0;
+ for ( ; bytes > 0; (bytes -= move_bytes), (offset += move_bytes))
+ {
+ /* Calculate the correct offset for src/dest */
+ if (offset == 0)
+ {
+ src_addr = src_reg;
+ dest_addr = dest_reg;
+ }
+ else
+ {
+ src_addr = plus_constant (src_reg, offset);
+ dest_addr = plus_constant (dest_reg, offset);
+ }
+
+ /* Generate the appropriate load and store, saving the stores
+ for later. */
+ if (bytes >= 8 && TARGET_POWERPC64
+ /* 64-bit loads and stores require word-aligned
+ displacements. */
+ && (align >= 8 || (! STRICT_ALIGNMENT && align >= 4)))
+ {
+ move_bytes = 8;
+ tmp_reg = gen_reg_rtx (DImode);
+ emit_insn (gen_movdi (tmp_reg,
+ expand_block_move_mem (DImode,
+ src_addr,
+ orig_src)));
+ stores[num_reg++] = gen_movdi (expand_block_move_mem (DImode,
+ dest_addr,
+ orig_dest),
+ tmp_reg);
+ }
+ else if (bytes >= 4 && (align >= 4 || ! STRICT_ALIGNMENT))
+ {
+ move_bytes = 4;
+ tmp_reg = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (tmp_reg,
+ expand_block_move_mem (SImode,
+ src_addr,
+ orig_src)));
+ stores[num_reg++] = gen_movsi (expand_block_move_mem (SImode,
+ dest_addr,
+ orig_dest),
+ tmp_reg);
+ }
+ else if (bytes >= 2 && (align >= 2 || ! STRICT_ALIGNMENT))
+ {
+ move_bytes = 2;
+ tmp_reg = gen_reg_rtx (HImode);
+ emit_insn (gen_movhi (tmp_reg,
+ expand_block_move_mem (HImode,
+ src_addr,
+ orig_src)));
+ stores[num_reg++] = gen_movhi (expand_block_move_mem (HImode,
+ dest_addr,
+ orig_dest),
+ tmp_reg);
+ }
+ else
+ {
+ move_bytes = 1;
+ tmp_reg = gen_reg_rtx (QImode);
+ emit_insn (gen_movqi (tmp_reg,
+ expand_block_move_mem (QImode,
+ src_addr,
+ orig_src)));
+ stores[num_reg++] = gen_movqi (expand_block_move_mem (QImode,
+ dest_addr,
+ orig_dest),
+ tmp_reg);
+ }
+
+ if (num_reg >= MAX_MOVE_REG)
+ {
+ for (i = 0; i < num_reg; i++)
+ emit_insn (stores[i]);
+ num_reg = 0;
+ }
+ }
+
+ for (i = 0; i < num_reg; i++)
+ emit_insn (stores[i]);
+ }
+
+ return 1;
+}
+
+
+/* Return 1 if OP is a load multiple operation. It is known to be a
+ PARALLEL and the first section will be tested. */
+
+int
+load_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Similar, but tests for store multiple. Here, the second vector element
+ is a CLOBBER. It will be tested later. */
+
+int
+store_multiple_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0) - 1;
+ unsigned int src_regno;
+ rtx dest_addr;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i + 1);
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
+ || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
+ || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != i * 4)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 for a parallel vrsave operation. */
+
+int
+vrsave_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno, src_regno;
+ int i;
+
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC_VOLATILE)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+
+ if (dest_regno != VRSAVE_REGNO
+ && src_regno != VRSAVE_REGNO)
+ return 0;
+
+ for (i = 1; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) != CLOBBER
+ && GET_CODE (elt) != SET)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 for an PARALLEL suitable for mtcrf. */
+
+int
+mtcrf_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ int i;
+ rtx src_reg;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count < 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != UNSPEC
+ || XVECLEN (SET_SRC (XVECEXP (op, 0, 0)), 0) != 2)
+ return 0;
+ src_reg = XVECEXP (SET_SRC (XVECEXP (op, 0, 0)), 0, 0);
+
+ if (GET_CODE (src_reg) != REG
+ || GET_MODE (src_reg) != SImode
+ || ! INT_REGNO_P (REGNO (src_reg)))
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx exp = XVECEXP (op, 0, i);
+ rtx unspec;
+ int maskval;
+
+ if (GET_CODE (exp) != SET
+ || GET_CODE (SET_DEST (exp)) != REG
+ || GET_MODE (SET_DEST (exp)) != CCmode
+ || ! CR_REGNO_P (REGNO (SET_DEST (exp))))
+ return 0;
+ unspec = SET_SRC (exp);
+ maskval = 1 << (MAX_CR_REGNO - REGNO (SET_DEST (exp)));
+
+ if (GET_CODE (unspec) != UNSPEC
+ || XINT (unspec, 1) != 20
+ || XVECLEN (unspec, 0) != 2
+ || XVECEXP (unspec, 0, 0) != src_reg
+ || GET_CODE (XVECEXP (unspec, 0, 1)) != CONST_INT
+ || INTVAL (XVECEXP (unspec, 0, 1)) != maskval)
+ return 0;
+ }
+ return 1;
+}
+
+/* Return 1 for an PARALLEL suitable for lmw. */
+
+int
+lmw_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ unsigned int dest_regno;
+ rtx src_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != REG
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != MEM)
+ return 0;
+
+ dest_regno = REGNO (SET_DEST (XVECEXP (op, 0, 0)));
+ src_addr = XEXP (SET_SRC (XVECEXP (op, 0, 0)), 0);
+
+ if (dest_regno > 31
+ || count != 32 - (int) dest_regno)
+ return 0;
+
+ if (LEGITIMATE_INDIRECT_ADDRESS_P (src_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (src_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (LEGITIMATE_OFFSET_ADDRESS_P (SImode, src_addr, 0))
+ {
+ offset = INTVAL (XEXP (src_addr, 1));
+ base_regno = REGNO (XEXP (src_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_DEST (elt)) != REG
+ || GET_MODE (SET_DEST (elt)) != SImode
+ || REGNO (SET_DEST (elt)) != dest_regno + i
+ || GET_CODE (SET_SRC (elt)) != MEM
+ || GET_MODE (SET_SRC (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_SRC (elt), 0);
+ if (LEGITIMATE_INDIRECT_ADDRESS_P (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (LEGITIMATE_OFFSET_ADDRESS_P (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Return 1 for an PARALLEL suitable for stmw. */
+
+int
+stmw_operation (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ int count = XVECLEN (op, 0);
+ unsigned int src_regno;
+ rtx dest_addr;
+ unsigned int base_regno;
+ HOST_WIDE_INT offset;
+ int i;
+
+ /* Perform a quick check so we don't blow up below. */
+ if (count <= 1
+ || GET_CODE (XVECEXP (op, 0, 0)) != SET
+ || GET_CODE (SET_DEST (XVECEXP (op, 0, 0))) != MEM
+ || GET_CODE (SET_SRC (XVECEXP (op, 0, 0))) != REG)
+ return 0;
+
+ src_regno = REGNO (SET_SRC (XVECEXP (op, 0, 0)));
+ dest_addr = XEXP (SET_DEST (XVECEXP (op, 0, 0)), 0);
+
+ if (src_regno > 31
+ || count != 32 - (int) src_regno)
+ return 0;
+
+ if (LEGITIMATE_INDIRECT_ADDRESS_P (dest_addr, 0))
+ {
+ offset = 0;
+ base_regno = REGNO (dest_addr);
+ if (base_regno == 0)
+ return 0;
+ }
+ else if (LEGITIMATE_OFFSET_ADDRESS_P (SImode, dest_addr, 0))
+ {
+ offset = INTVAL (XEXP (dest_addr, 1));
+ base_regno = REGNO (XEXP (dest_addr, 0));
+ }
+ else
+ return 0;
+
+ for (i = 0; i < count; i++)
+ {
+ rtx elt = XVECEXP (op, 0, i);
+ rtx newaddr;
+ rtx addr_reg;
+ HOST_WIDE_INT newoffset;
+
+ if (GET_CODE (elt) != SET
+ || GET_CODE (SET_SRC (elt)) != REG
+ || GET_MODE (SET_SRC (elt)) != SImode
+ || REGNO (SET_SRC (elt)) != src_regno + i
+ || GET_CODE (SET_DEST (elt)) != MEM
+ || GET_MODE (SET_DEST (elt)) != SImode)
+ return 0;
+ newaddr = XEXP (SET_DEST (elt), 0);
+ if (LEGITIMATE_INDIRECT_ADDRESS_P (newaddr, 0))
+ {
+ newoffset = 0;
+ addr_reg = newaddr;
+ }
+ else if (LEGITIMATE_OFFSET_ADDRESS_P (SImode, newaddr, 0))
+ {
+ addr_reg = XEXP (newaddr, 0);
+ newoffset = INTVAL (XEXP (newaddr, 1));
+ }
+ else
+ return 0;
+ if (REGNO (addr_reg) != base_regno
+ || newoffset != offset + 4 * i)
+ return 0;
+ }
+
+ return 1;
+}
+
+/* A validation routine: say whether CODE, a condition code, and MODE
+ match. The other alternatives either don't make sense or should
+ never be generated. */
+
+static void
+validate_condition_mode (code, mode)
+ enum rtx_code code;
+ enum machine_mode mode;
+{
+ if (GET_RTX_CLASS (code) != '<'
+ || GET_MODE_CLASS (mode) != MODE_CC)
+ abort ();
+
+ /* These don't make sense. */
+ if ((code == GT || code == LT || code == GE || code == LE)
+ && mode == CCUNSmode)
+ abort ();
+
+ if ((code == GTU || code == LTU || code == GEU || code == LEU)
+ && mode != CCUNSmode)
+ abort ();
+
+ if (mode != CCFPmode
+ && (code == ORDERED || code == UNORDERED
+ || code == UNEQ || code == LTGT
+ || code == UNGT || code == UNLT
+ || code == UNGE || code == UNLE))
+ abort ();
+
+ /* These should never be generated except for
+ flag_unsafe_math_optimizations. */
+ if (mode == CCFPmode
+ && ! flag_unsafe_math_optimizations
+ && (code == LE || code == GE
+ || code == UNEQ || code == LTGT
+ || code == UNGT || code == UNLT))
+ abort ();
+
+ /* These are invalid; the information is not there. */
+ if (mode == CCEQmode
+ && code != EQ && code != NE)
+ abort ();
+}
+
+/* Return 1 if OP is a comparison operation that is valid for a branch insn.
+ We only check the opcode against the mode of the CC value here. */
+
+int
+branch_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode cc_mode;
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ cc_mode = GET_MODE (XEXP (op, 0));
+ if (GET_MODE_CLASS (cc_mode) != MODE_CC)
+ return 0;
+
+ validate_condition_mode (code, cc_mode);
+
+ return 1;
+}
+
+/* Return 1 if OP is a comparison operation that is valid for a branch
+ insn and which is true if the corresponding bit in the CC register
+ is set. */
+
+int
+branch_positive_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code;
+
+ if (! branch_comparison_operator (op, mode))
+ return 0;
+
+ code = GET_CODE (op);
+ return (code == EQ || code == LT || code == GT
+ || code == LTU || code == GTU
+ || code == UNORDERED);
+}
+
+/* Return 1 if OP is a comparison operation that is valid for an scc insn.
+ We check the opcode against the mode of the CC value and disallow EQ or
+ NE comparisons for integers. */
+
+int
+scc_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode cc_mode;
+
+ if (GET_MODE (op) != mode && mode != VOIDmode)
+ return 0;
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ cc_mode = GET_MODE (XEXP (op, 0));
+ if (GET_MODE_CLASS (cc_mode) != MODE_CC)
+ return 0;
+
+ validate_condition_mode (code, cc_mode);
+
+ if (code == NE && cc_mode != CCFPmode)
+ return 0;
+
+ return 1;
+}
+
+int
+trap_comparison_operator (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ return GET_RTX_CLASS (GET_CODE (op)) == '<';
+}
+
+int
+boolean_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+ return (code == AND || code == IOR || code == XOR);
+}
+
+int
+boolean_or_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+ return (code == IOR || code == XOR);
+}
+
+int
+min_max_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+ return (code == SMIN || code == SMAX || code == UMIN || code == UMAX);
+}
+
+/* Return 1 if ANDOP is a mask that has no bits on that are not in the
+ mask required to convert the result of a rotate insn into a shift
+ left insn of SHIFTOP bits. Both are known to be CONST_INT. */
+
+int
+includes_lshift_p (shiftop, andop)
+ rtx shiftop;
+ rtx andop;
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask <<= INTVAL (shiftop);
+
+ return (INTVAL (andop) & ~shift_mask) == 0;
+}
+
+/* Similar, but for right shift. */
+
+int
+includes_rshift_p (shiftop, andop)
+ rtx shiftop;
+ rtx andop;
+{
+ unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
+
+ shift_mask >>= INTVAL (shiftop);
+
+ return (INTVAL (andop) & ~shift_mask) == 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldic insn
+ to perform a left shift. It must have exactly SHIFTOP least
+ signifigant 0's, then one or more 1's, then zero or more 0's. */
+
+int
+includes_rldic_lshift_p (shiftop, andop)
+ rtx shiftop;
+ rtx andop;
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ c = INTVAL (andop);
+ if (c == 0 || c == ~0)
+ return 0;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+
+ /* Find the least signifigant one bit. */
+ lsb = c & -c;
+
+ /* It must coincide with the LSB of the shift mask. */
+ if (-lsb != shift_mask)
+ return 0;
+
+ /* Invert to look for the next transition (if any). */
+ c = ~c;
+
+ /* Remove the low group of ones (originally low group of zeros). */
+ c &= -lsb;
+
+ /* Again find the lsb, and check we have all 1's above. */
+ lsb = c & -c;
+ return c == -lsb;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, high, lsb;
+ HOST_WIDE_INT shift_mask_low, shift_mask_high;
+
+ low = CONST_DOUBLE_LOW (andop);
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if ((low == 0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == 0))
+ || (low == ~0 && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0)))
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if (-lsb != shift_mask_high || INTVAL (shiftop) < 32)
+ return 0;
+
+ high = ~high;
+ high &= -lsb;
+
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if (-lsb != shift_mask_low)
+ return 0;
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ high = ~high;
+ low = ~low;
+ low &= -lsb;
+
+ if (HOST_BITS_PER_WIDE_INT < 64 && low == 0)
+ {
+ lsb = high & -high;
+ return high == -lsb;
+ }
+
+ lsb = low & -low;
+ return low == -lsb && (HOST_BITS_PER_WIDE_INT >= 64 || high == ~0);
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
+ to perform a left shift. It must have SHIFTOP or more least
+ signifigant 0's, with the remainder of the word 1's. */
+
+int
+includes_rldicr_lshift_p (shiftop, andop)
+ rtx shiftop;
+ rtx andop;
+{
+ if (GET_CODE (andop) == CONST_INT)
+ {
+ HOST_WIDE_INT c, lsb, shift_mask;
+
+ shift_mask = ~0;
+ shift_mask <<= INTVAL (shiftop);
+ c = INTVAL (andop);
+
+ /* Find the least signifigant one bit. */
+ lsb = c & -c;
+
+ /* It must be covered by the shift mask.
+ This test also rejects c == 0. */
+ if ((lsb & shift_mask) == 0)
+ return 0;
+
+ /* Check we have all 1's above the transition, and reject all 1's. */
+ return c == -lsb && lsb != 1;
+ }
+ else if (GET_CODE (andop) == CONST_DOUBLE
+ && (GET_MODE (andop) == VOIDmode || GET_MODE (andop) == DImode))
+ {
+ HOST_WIDE_INT low, lsb, shift_mask_low;
+
+ low = CONST_DOUBLE_LOW (andop);
+
+ if (HOST_BITS_PER_WIDE_INT < 64)
+ {
+ HOST_WIDE_INT high, shift_mask_high;
+
+ high = CONST_DOUBLE_HIGH (andop);
+
+ if (low == 0)
+ {
+ shift_mask_high = ~0;
+ if (INTVAL (shiftop) > 32)
+ shift_mask_high <<= INTVAL (shiftop) - 32;
+
+ lsb = high & -high;
+
+ if ((lsb & shift_mask_high) == 0)
+ return 0;
+
+ return high == -lsb;
+ }
+ if (high != ~0)
+ return 0;
+ }
+
+ shift_mask_low = ~0;
+ shift_mask_low <<= INTVAL (shiftop);
+
+ lsb = low & -low;
+
+ if ((lsb & shift_mask_low) == 0)
+ return 0;
+
+ return low == -lsb && lsb != 1;
+ }
+ else
+ return 0;
+}
+
+/* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
+ for lfq and stfq insns.
+
+ Note reg1 and reg2 *must* be hard registers. To be sure we will
+ abort if we are passed pseudo registers. */
+
+int
+registers_ok_for_quad_peep (reg1, reg2)
+ rtx reg1, reg2;
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ return (REGNO (reg1) == REGNO (reg2) - 1);
+}
+
+/* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
+ addr1 and addr2 must be in consecutive memory locations
+ (addr2 == addr1 + 8). */
+
+int
+addrs_ok_for_quad_peep (addr1, addr2)
+ rtx addr1;
+ rtx addr2;
+{
+ unsigned int reg1;
+ int offset1;
+
+ /* Extract an offset (if used) from the first addr. */
+ if (GET_CODE (addr1) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr1, 0)) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (XEXP (addr1, 0));
+ /* The offset must be constant! */
+ if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
+ }
+ }
+ else if (GET_CODE (addr1) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (addr1);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset1 = 0;
+ }
+
+ /* Make sure the second address is a (mem (plus (reg) (const_int))). */
+ if (GET_CODE (addr2) != PLUS)
+ return 0;
+
+ if (GET_CODE (XEXP (addr2, 0)) != REG
+ || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+
+ if (reg1 != REGNO (XEXP (addr2, 0)))
+ return 0;
+
+ /* The offset for the second addr must be 8 more than the first addr. */
+ if (INTVAL (XEXP (addr2, 1)) != offset1 + 8)
+ return 0;
+
+ /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
+ instructions. */
+ return 1;
+}
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+enum reg_class
+secondary_reload_class (class, mode, in)
+ enum reg_class class;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx in;
+{
+ int regno;
+
+ if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN && flag_pic))
+ {
+ /* We cannot copy a symbolic operand directly into anything
+ other than BASE_REGS for TARGET_ELF. So indicate that a
+ register from BASE_REGS is needed as an intermediate
+ register.
+
+ On Darwin, pic addresses require a load from memory, which
+ needs a base register. */
+ if (class != BASE_REGS
+ && (GET_CODE (in) == SYMBOL_REF
+ || GET_CODE (in) == HIGH
+ || GET_CODE (in) == LABEL_REF
+ || GET_CODE (in) == CONST))
+ return BASE_REGS;
+ }
+
+ if (GET_CODE (in) == REG)
+ {
+ regno = REGNO (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ }
+ else if (GET_CODE (in) == SUBREG)
+ {
+ regno = true_regnum (in);
+ if (regno >= FIRST_PSEUDO_REGISTER)
+ regno = -1;
+ }
+ else
+ regno = -1;
+
+ /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
+ into anything. */
+ if (class == GENERAL_REGS || class == BASE_REGS
+ || (regno >= 0 && INT_REGNO_P (regno)))
+ return NO_REGS;
+
+ /* Constants, memory, and FP registers can go into FP registers. */
+ if ((regno == -1 || FP_REGNO_P (regno))
+ && (class == FLOAT_REGS || class == NON_SPECIAL_REGS))
+ return NO_REGS;
+
+ /* Memory, and AltiVec registers can go into AltiVec registers. */
+ if ((regno == -1 || ALTIVEC_REGNO_P (regno))
+ && class == ALTIVEC_REGS)
+ return NO_REGS;
+
+ /* We can copy among the CR registers. */
+ if ((class == CR_REGS || class == CR0_REGS)
+ && regno >= 0 && CR_REGNO_P (regno))
+ return NO_REGS;
+
+ /* Otherwise, we need GENERAL_REGS. */
+ return GENERAL_REGS;
+}
+
+/* Given a comparison operation, return the bit number in CCR to test. We
+ know this is a valid comparison.
+
+ SCC_P is 1 if this is for an scc. That means that %D will have been
+ used instead of %C, so the bits will be in different places.
+
+ Return -1 if OP isn't a valid comparison for some reason. */
+
+int
+ccr_bit (op, scc_p)
+ rtx op;
+ int scc_p;
+{
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode cc_mode;
+ int cc_regnum;
+ int base_bit;
+ rtx reg;
+
+ if (GET_RTX_CLASS (code) != '<')
+ return -1;
+
+ reg = XEXP (op, 0);
+
+ if (GET_CODE (reg) != REG
+ || ! CR_REGNO_P (REGNO (reg)))
+ abort ();
+
+ cc_mode = GET_MODE (reg);
+ cc_regnum = REGNO (reg);
+ base_bit = 4 * (cc_regnum - CR0_REGNO);
+
+ validate_condition_mode (code, cc_mode);
+
+ switch (code)
+ {
+ case NE:
+ return scc_p ? base_bit + 3 : base_bit + 2;
+ case EQ:
+ return base_bit + 2;
+ case GT: case GTU: case UNLE:
+ return base_bit + 1;
+ case LT: case LTU: case UNGE:
+ return base_bit;
+ case ORDERED: case UNORDERED:
+ return base_bit + 3;
+
+ case GE: case GEU:
+ /* If scc, we will have done a cror to put the bit in the
+ unordered position. So test that bit. For integer, this is ! LT
+ unless this is an scc insn. */
+ return scc_p ? base_bit + 3 : base_bit;
+
+ case LE: case LEU:
+ return scc_p ? base_bit + 3 : base_bit + 1;
+
+ default:
+ abort ();
+ }
+}
+
+/* Return the GOT register. */
+
+struct rtx_def *
+rs6000_got_register (value)
+ rtx value ATTRIBUTE_UNUSED;
+{
+ /* The second flow pass currently (June 1999) can't update
+ regs_ever_live without disturbing other parts of the compiler, so
+ update it here to make the prolog/epilogue code happy. */
+ if (no_new_pseudos && ! regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
+ regs_ever_live[PIC_OFFSET_TABLE_REGNUM] = 1;
+
+ current_function_uses_pic_offset_table = 1;
+
+ return pic_offset_table_rtx;
+}
+
+/* Functions to init, mark and free struct machine_function.
+ These will be called, via pointer variables,
+ from push_function_context and pop_function_context. */
+
+static void
+rs6000_init_machine_status (p)
+ struct function *p;
+{
+ p->machine = (machine_function *) xcalloc (1, sizeof (machine_function));
+}
+
+static void
+rs6000_free_machine_status (p)
+ struct function *p;
+{
+ if (p->machine == NULL)
+ return;
+
+ free (p->machine);
+ p->machine = NULL;
+}
+
+
+/* Print an operand. Recognize special options, documented below. */
+
+#if TARGET_ELF
+#define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
+#define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
+#else
+#define SMALL_DATA_RELOC "sda21"
+#define SMALL_DATA_REG 0
+#endif
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ int i;
+ HOST_WIDE_INT val;
+
+ /* These macros test for integers and extract the low-order bits. */
+#define INT_P(X) \
+((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
+ && GET_MODE (X) == VOIDmode)
+
+#define INT_LOWPART(X) \
+ (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
+
+ switch (code)
+ {
+ case '.':
+ /* Write out an instruction after the call which may be replaced
+ with glue code by the loader. This depends on the AIX version. */
+ asm_fprintf (file, RS6000_CALL_GLUE);
+ return;
+
+ /* %a is output_address. */
+
+ case 'A':
+ /* If X is a constant integer whose low-order 5 bits are zero,
+ write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
+ in the AIX assembler where "sri" with a zero shift count
+ writes a trash instruction. */
+ if (GET_CODE (x) == CONST_INT && (INTVAL (x) & 31) == 0)
+ putc ('l', file);
+ else
+ putc ('r', file);
+ return;
+
+ case 'b':
+ /* If constant, low-order 16 bits of constant, unsigned.
+ Otherwise, write normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 0xffff);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'B':
+ /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
+ for 64-bit mask direction. */
+ putc (((INT_LOWPART(x) & 1) == 0 ? 'r' : 'l'), file);
+ return;
+
+ /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
+ output_operand. */
+
+ case 'D':
+ /* There used to be a comment for 'C' reading "This is an
+ optional cror needed for certain floating-point
+ comparisons. Otherwise write nothing." */
+
+ /* Similar, except that this is for an scc, so we must be able to
+ encode the test in a single bit that is one. We do the above
+ for any LE, GE, GEU, or LEU and invert the bit for NE. */
+ if (GET_CODE (x) == LE || GET_CODE (x) == GE
+ || GET_CODE (x) == LEU || GET_CODE (x) == GEU)
+ {
+ int base_bit = 4 * (REGNO (XEXP (x, 0)) - CR0_REGNO);
+
+ fprintf (file, "cror %d,%d,%d\n\t", base_bit + 3,
+ base_bit + 2,
+ base_bit + (GET_CODE (x) == GE || GET_CODE (x) == GEU));
+ }
+
+ else if (GET_CODE (x) == NE)
+ {
+ int base_bit = 4 * (REGNO (XEXP (x, 0)) - CR0_REGNO);
+
+ fprintf (file, "crnor %d,%d,%d\n\t", base_bit + 3,
+ base_bit + 2, base_bit + 2);
+ }
+ return;
+
+ case 'E':
+ /* X is a CR register. Print the number of the EQ bit of the CR */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%E value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
+ return;
+
+ case 'f':
+ /* X is a CR register. Print the shift count needed to move it
+ to the high-order four bits. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%f value");
+ else
+ fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'F':
+ /* Similar, but print the count for the rotate in the opposite
+ direction. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%F value");
+ else
+ fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 'G':
+ /* X is a constant integer. If it is negative, print "m",
+ otherwise print "z". This is to make a aze or ame insn. */
+ if (GET_CODE (x) != CONST_INT)
+ output_operand_lossage ("invalid %%G value");
+ else if (INTVAL (x) >= 0)
+ putc ('z', file);
+ else
+ putc ('m', file);
+ return;
+
+ case 'h':
+ /* If constant, output low-order five bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 31);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'H':
+ /* If constant, output low-order six bits. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INT_LOWPART (x) & 63);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'I':
+ /* Print `i' if this is a constant, else nothing. */
+ if (INT_P (x))
+ putc ('i', file);
+ return;
+
+ case 'j':
+ /* Write the bit number in CCR for jump. */
+ i = ccr_bit (x, 0);
+ if (i == -1)
+ output_operand_lossage ("invalid %%j code");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'J':
+ /* Similar, but add one for shift count in rlinm for scc and pass
+ scc flag to `ccr_bit'. */
+ i = ccr_bit (x, 1);
+ if (i == -1)
+ output_operand_lossage ("invalid %%J code");
+ else
+ /* If we want bit 31, write a shift count of zero, not 32. */
+ fprintf (file, "%d", i == 31 ? 0 : i + 1);
+ return;
+
+ case 'k':
+ /* X must be a constant. Write the 1's complement of the
+ constant. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%k value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INT_LOWPART (x));
+ return;
+
+ case 'K':
+ /* X must be a symbolic constant on ELF. Write an
+ expression suitable for an 'addi' that adds in the low 16
+ bits of the MEM. */
+ if (GET_CODE (x) != CONST)
+ {
+ print_operand_address (file, x);
+ fputs ("@l", file);
+ }
+ else
+ {
+ if (GET_CODE (XEXP (x, 0)) != PLUS
+ || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
+ || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
+ output_operand_lossage ("invalid %%K value");
+ print_operand_address (file, XEXP (XEXP (x, 0), 0));
+ fputs ("@l", file);
+ print_operand (file, XEXP (XEXP (x, 0), 1), 0);
+ }
+ return;
+
+ /* %l is output_asm_label. */
+
+ case 'L':
+ /* Write second word of DImode or DFmode reference. Works on register
+ or non-indexed memory only. */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x) + 1]);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* Handle possible auto-increment. Since it is pre-increment and
+ we have already done it, we can just use an offset of word. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0),
+ UNITS_PER_WORD));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode,
+ UNITS_PER_WORD),
+ 0));
+
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'm':
+ /* MB value for a mask operand. */
+ if (! mask_operand (x, VOIDmode))
+ output_operand_lossage ("invalid %%m value");
+
+ val = INT_LOWPART (x);
+
+ /* If the high bit is set and the low bit is not, the value is zero.
+ If the high bit is zero, the value is the first 1 bit we find from
+ the left. */
+ if ((val & 0x80000000) && ((val & 1) == 0))
+ {
+ putc ('0', file);
+ return;
+ }
+ else if ((val & 0x80000000) == 0)
+ {
+ for (i = 1; i < 32; i++)
+ if ((val <<= 1) & 0x80000000)
+ break;
+ fprintf (file, "%d", i);
+ return;
+ }
+
+ /* Otherwise, look for the first 0 bit from the right. The result is its
+ number plus 1. We know the low-order bit is one. */
+ for (i = 0; i < 32; i++)
+ if (((val >>= 1) & 1) == 0)
+ break;
+
+ /* If we ended in ...01, i would be 0. The correct value is 31, so
+ we want 31 - i. */
+ fprintf (file, "%d", 31 - i);
+ return;
+
+ case 'M':
+ /* ME value for a mask operand. */
+ if (! mask_operand (x, VOIDmode))
+ output_operand_lossage ("invalid %%M value");
+
+ val = INT_LOWPART (x);
+
+ /* If the low bit is set and the high bit is not, the value is 31.
+ If the low bit is zero, the value is the first 1 bit we find from
+ the right. */
+ if ((val & 1) && ((val & 0x80000000) == 0))
+ {
+ fputs ("31", file);
+ return;
+ }
+ else if ((val & 1) == 0)
+ {
+ for (i = 0; i < 32; i++)
+ if ((val >>= 1) & 1)
+ break;
+
+ /* If we had ....10, i would be 0. The result should be
+ 30, so we need 30 - i. */
+ fprintf (file, "%d", 30 - i);
+ return;
+ }
+
+ /* Otherwise, look for the first 0 bit from the left. The result is its
+ number minus 1. We know the high-order bit is one. */
+ for (i = 0; i < 32; i++)
+ if (((val <<= 1) & 0x80000000) == 0)
+ break;
+
+ fprintf (file, "%d", i);
+ return;
+
+ /* %n outputs the negative of its operand. */
+
+ case 'N':
+ /* Write the number of elements in the vector times 4. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%N value");
+ else
+ fprintf (file, "%d", XVECLEN (x, 0) * 4);
+ return;
+
+ case 'O':
+ /* Similar, but subtract 1 first. */
+ if (GET_CODE (x) != PARALLEL)
+ output_operand_lossage ("invalid %%O value");
+ else
+ fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
+ return;
+
+ case 'p':
+ /* X is a CONST_INT that is a power of two. Output the logarithm. */
+ if (! INT_P (x)
+ || INT_LOWPART (x) < 0
+ || (i = exact_log2 (INT_LOWPART (x))) < 0)
+ output_operand_lossage ("invalid %%p value");
+ else
+ fprintf (file, "%d", i);
+ return;
+
+ case 'P':
+ /* The operand must be an indirect memory reference. The result
+ is the register number. */
+ if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
+ || REGNO (XEXP (x, 0)) >= 32)
+ output_operand_lossage ("invalid %%P value");
+ else
+ fprintf (file, "%d", REGNO (XEXP (x, 0)));
+ return;
+
+ case 'q':
+ /* This outputs the logical code corresponding to a boolean
+ expression. The expression may have one or both operands
+ negated (if one, only the first one). For condition register
+ logical operations, it will also treat the negated
+ CR codes as NOTs, but not handle NOTs of them. */
+ {
+ const char *const *t = 0;
+ const char *s;
+ enum rtx_code code = GET_CODE (x);
+ static const char * const tbl[3][3] = {
+ { "and", "andc", "nor" },
+ { "or", "orc", "nand" },
+ { "xor", "eqv", "xor" } };
+
+ if (code == AND)
+ t = tbl[0];
+ else if (code == IOR)
+ t = tbl[1];
+ else if (code == XOR)
+ t = tbl[2];
+ else
+ output_operand_lossage ("invalid %%q value");
+
+ if (GET_CODE (XEXP (x, 0)) != NOT)
+ s = t[0];
+ else
+ {
+ if (GET_CODE (XEXP (x, 1)) == NOT)
+ s = t[2];
+ else
+ s = t[1];
+ }
+
+ fputs (s, file);
+ }
+ return;
+
+ case 'R':
+ /* X is a CR register. Print the mask for `mtcrf'. */
+ if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
+ output_operand_lossage ("invalid %%R value");
+ else
+ fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
+ return;
+
+ case 's':
+ /* Low 5 bits of 32 - value */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%s value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INT_LOWPART (x)) & 31);
+ return;
+
+ case 'S':
+ /* PowerPC64 mask position. All 0's and all 1's are excluded.
+ CONST_INT 32-bit mask is considered sign-extended so any
+ transition must occur within the CONST_INT, not on the boundary. */
+ if (! mask64_operand (x, VOIDmode))
+ output_operand_lossage ("invalid %%S value");
+
+ val = INT_LOWPART (x);
+
+ if (val & 1) /* Clear Left */
+ {
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
+ if (!((val >>= 1) & 1))
+ break;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (x) == CONST_DOUBLE && i == 32)
+ {
+ val = CONST_DOUBLE_HIGH (x);
+
+ if (val == 0)
+ --i;
+ else
+ for (i = 32; i < 64; i++)
+ if (!((val >>= 1) & 1))
+ break;
+ }
+#endif
+ /* i = index of last set bit from right
+ mask begins at 63 - i from left */
+ if (i > 63)
+ output_operand_lossage ("%%S computed all 1's mask");
+
+ fprintf (file, "%d", 63 - i);
+ return;
+ }
+ else /* Clear Right */
+ {
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
+ if ((val >>= 1) & 1)
+ break;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (x) == CONST_DOUBLE && i == 32)
+ {
+ val = CONST_DOUBLE_HIGH (x);
+
+ if (val == (HOST_WIDE_INT) -1)
+ --i;
+ else
+ for (i = 32; i < 64; i++)
+ if ((val >>= 1) & 1)
+ break;
+ }
+#endif
+ /* i = index of last clear bit from right
+ mask ends at 62 - i from left */
+ if (i > 62)
+ output_operand_lossage ("%%S computed all 0's mask");
+
+ fprintf (file, "%d", 62 - i);
+ return;
+ }
+
+ case 'T':
+ /* Print the symbolic name of a branch target register. */
+ if (GET_CODE (x) != REG || (REGNO (x) != LINK_REGISTER_REGNUM
+ && REGNO (x) != COUNT_REGISTER_REGNUM))
+ output_operand_lossage ("invalid %%T value");
+ else if (REGNO (x) == LINK_REGISTER_REGNUM)
+ fputs (TARGET_NEW_MNEMONICS ? "lr" : "r", file);
+ else
+ fputs ("ctr", file);
+ return;
+
+ case 'u':
+ /* High-order 16 bits of constant for use in unsigned operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%u value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'v':
+ /* High-order 16 bits of constant for use in signed operand. */
+ if (! INT_P (x))
+ output_operand_lossage ("invalid %%v value");
+ else
+ fprintf (file, HOST_WIDE_INT_PRINT_HEX,
+ (INT_LOWPART (x) >> 16) & 0xffff);
+ return;
+
+ case 'U':
+ /* Print `u' if this has an auto-increment or auto-decrement. */
+ if (GET_CODE (x) == MEM
+ && (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC))
+ putc ('u', file);
+ return;
+
+ case 'V':
+ /* Print the trap code for this operand. */
+ switch (GET_CODE (x))
+ {
+ case EQ:
+ fputs ("eq", file); /* 4 */
+ break;
+ case NE:
+ fputs ("ne", file); /* 24 */
+ break;
+ case LT:
+ fputs ("lt", file); /* 16 */
+ break;
+ case LE:
+ fputs ("le", file); /* 20 */
+ break;
+ case GT:
+ fputs ("gt", file); /* 8 */
+ break;
+ case GE:
+ fputs ("ge", file); /* 12 */
+ break;
+ case LTU:
+ fputs ("llt", file); /* 2 */
+ break;
+ case LEU:
+ fputs ("lle", file); /* 6 */
+ break;
+ case GTU:
+ fputs ("lgt", file); /* 1 */
+ break;
+ case GEU:
+ fputs ("lge", file); /* 5 */
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ case 'w':
+ /* If constant, low-order 16 bits of constant, signed. Otherwise, write
+ normally. */
+ if (INT_P (x))
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ ((INT_LOWPART (x) & 0xffff) ^ 0x8000) - 0x8000);
+ else
+ print_operand (file, x, 0);
+ return;
+
+ case 'W':
+ /* MB value for a PowerPC64 rldic operand. */
+ val = (GET_CODE (x) == CONST_INT
+ ? INTVAL (x) : CONST_DOUBLE_HIGH (x));
+
+ if (val < 0)
+ i = -1;
+ else
+ for (i = 0; i < HOST_BITS_PER_WIDE_INT; i++)
+ if ((val <<= 1) < 0)
+ break;
+
+#if HOST_BITS_PER_WIDE_INT == 32
+ if (GET_CODE (x) == CONST_INT && i >= 0)
+ i += 32; /* zero-extend high-part was all 0's */
+ else if (GET_CODE (x) == CONST_DOUBLE && i == 32)
+ {
+ val = CONST_DOUBLE_LOW (x);
+
+ if (val == 0)
+ abort ();
+ else if (val < 0)
+ --i;
+ else
+ for ( ; i < 64; i++)
+ if ((val <<= 1) < 0)
+ break;
+ }
+#endif
+
+ fprintf (file, "%d", i + 1);
+ return;
+
+ case 'X':
+ if (GET_CODE (x) == MEM
+ && LEGITIMATE_INDEXED_ADDRESS_P (XEXP (x, 0), 0))
+ putc ('x', file);
+ return;
+
+ case 'Y':
+ /* Like 'L', for third word of TImode */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x) + 2]);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 8));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ case 'z':
+ /* X is a SYMBOL_REF. Write out the name preceded by a
+ period and without any trailing data in brackets. Used for function
+ names. If we are configured for System V (or the embedded ABI) on
+ the PowerPC, do not emit the period, since those systems do not use
+ TOCs and the like. */
+ if (GET_CODE (x) != SYMBOL_REF)
+ abort ();
+
+ if (XSTR (x, 0)[0] != '.')
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_AIX:
+ putc ('.', file);
+ break;
+
+ case ABI_V4:
+ case ABI_AIX_NODESC:
+ case ABI_DARWIN:
+ break;
+ }
+ }
+#if TARGET_AIX
+ RS6000_OUTPUT_BASENAME (file, XSTR (x, 0));
+#else
+ assemble_name (file, XSTR (x, 0));
+#endif
+ return;
+
+ case 'Z':
+ /* Like 'L', for last word of TImode. */
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x) + 3]);
+ else if (GET_CODE (x) == MEM)
+ {
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC
+ || GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ output_address (plus_constant (XEXP (XEXP (x, 0), 0), 12));
+ else
+ output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ }
+ return;
+
+ /* Print AltiVec memory operand. */
+ case 'y':
+ {
+ rtx tmp;
+
+ if (GET_CODE (x) != MEM)
+ abort ();
+
+ tmp = XEXP (x, 0);
+
+ if (GET_CODE (tmp) == REG)
+ fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
+ else if (GET_CODE (tmp) == PLUS && GET_CODE (XEXP (tmp, 1)) == REG)
+ {
+ if (REGNO (XEXP (tmp, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
+ reg_names[ REGNO (XEXP (tmp, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
+ reg_names[ REGNO (XEXP (tmp, 1)) ]);
+ }
+ else
+ abort ();
+ break;
+ }
+
+ case 0:
+ if (GET_CODE (x) == REG)
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ else if (GET_CODE (x) == MEM)
+ {
+ /* We need to handle PRE_INC and PRE_DEC here, since we need to
+ know the width from the mode. */
+ if (GET_CODE (XEXP (x, 0)) == PRE_INC)
+ fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
+ fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
+ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
+ else
+ output_address (XEXP (x, 0));
+ }
+ else
+ output_addr_const (file, x);
+ return;
+
+ default:
+ output_operand_lossage ("invalid %%xn code");
+ }
+}
+
+/* Print the address of an operand. */
+
+void
+print_operand_address (file, x)
+ FILE *file;
+ rtx x;
+{
+ if (GET_CODE (x) == REG)
+ fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
+ else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
+ || GET_CODE (x) == LABEL_REF)
+ {
+ output_addr_const (file, x);
+ if (small_data_operand (x, GET_MODE (x)))
+ fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
+ reg_names[SMALL_DATA_REG]);
+ else if (TARGET_TOC)
+ abort ();
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == REG)
+ {
+ if (REGNO (XEXP (x, 0)) == 0)
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
+ reg_names[ REGNO (XEXP (x, 0)) ]);
+ else
+ fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
+ reg_names[ REGNO (XEXP (x, 1)) ]);
+ }
+ else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
+ {
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (XEXP (x, 1)));
+ fprintf (file, "(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#if TARGET_ELF
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+#if TARGET_MACHO
+ else if (GET_CODE (x) == LO_SUM && GET_CODE (XEXP (x, 0)) == REG
+ && CONSTANT_P (XEXP (x, 1)))
+ {
+ fprintf (file, "lo16(");
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
+ }
+#endif
+ else if (LEGITIMATE_CONSTANT_POOL_ADDRESS_P (x))
+ {
+ if (TARGET_AIX && (!TARGET_ELF || !TARGET_MINIMAL_TOC))
+ {
+ rtx contains_minus = XEXP (x, 1);
+ rtx minus, symref;
+ const char *name;
+
+ /* Find the (minus (sym) (toc)) buried in X, and temporarily
+ turn it into (sym) for output_addr_const. */
+ while (GET_CODE (XEXP (contains_minus, 0)) != MINUS)
+ contains_minus = XEXP (contains_minus, 0);
+
+ minus = XEXP (contains_minus, 0);
+ symref = XEXP (minus, 0);
+ XEXP (contains_minus, 0) = symref;
+ if (TARGET_ELF)
+ {
+ char *newname;
+
+ name = XSTR (symref, 0);
+ newname = alloca (strlen (name) + sizeof ("@toc"));
+ strcpy (newname, name);
+ strcat (newname, "@toc");
+ XSTR (symref, 0) = newname;
+ }
+ output_addr_const (file, XEXP (x, 1));
+ if (TARGET_ELF)
+ XSTR (symref, 0) = name;
+ XEXP (contains_minus, 0) = minus;
+ }
+ else
+ output_addr_const (file, XEXP (x, 1));
+
+ fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
+ }
+ else
+ abort ();
+}
+
+/* Target hook for assembling integer objects. The powerpc version has
+ to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
+ is defined. It also needs to handle DI-mode objects on 64-bit
+ targets. */
+
+static bool
+rs6000_assemble_integer (x, size, aligned_p)
+ rtx x;
+ unsigned int size;
+ int aligned_p;
+{
+#ifdef RELOCATABLE_NEEDS_FIXUP
+ /* Special handling for SI values. */
+ if (size == 4 && aligned_p)
+ {
+ extern int in_toc_section PARAMS ((void));
+ static int recurse = 0;
+
+ /* For -mrelocatable, we mark all addresses that need to be fixed up
+ in the .fixup section. */
+ if (TARGET_RELOCATABLE
+ && !in_toc_section ()
+ && !in_text_section ()
+ && !recurse
+ && GET_CODE (x) != CONST_INT
+ && GET_CODE (x) != CONST_DOUBLE
+ && CONSTANT_P (x))
+ {
+ char buf[256];
+
+ recurse = 1;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
+ fixuplabelno++;
+ ASM_OUTPUT_LABEL (asm_out_file, buf);
+ fprintf (asm_out_file, "\t.long\t(");
+ output_addr_const (asm_out_file, x);
+ fprintf (asm_out_file, ")@fixup\n");
+ fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
+ ASM_OUTPUT_ALIGN (asm_out_file, 2);
+ fprintf (asm_out_file, "\t.long\t");
+ assemble_name (asm_out_file, buf);
+ fprintf (asm_out_file, "\n\t.previous\n");
+ recurse = 0;
+ return true;
+ }
+ /* Remove initial .'s to turn a -mcall-aixdesc function
+ address into the address of the descriptor, not the function
+ itself. */
+ else if (GET_CODE (x) == SYMBOL_REF
+ && XSTR (x, 0)[0] == '.'
+ && DEFAULT_ABI == ABI_AIX)
+ {
+ const char *name = XSTR (x, 0);
+ while (*name == '.')
+ name++;
+
+ fprintf (asm_out_file, "\t.long\t%s\n", name);
+ return true;
+ }
+ }
+#endif /* RELOCATABLE_NEEDS_FIXUP */
+ return default_assemble_integer (x, size, aligned_p);
+}
+
+enum rtx_code
+rs6000_reverse_condition (mode, code)
+ enum machine_mode mode;
+ enum rtx_code code;
+{
+ /* Reversal of FP compares takes care -- an ordered compare
+ becomes an unordered compare and vice versa. */
+ if (mode == CCFPmode)
+ return reverse_condition_maybe_unordered (code);
+ else
+ return reverse_condition (code);
+}
+
+/* Generate a compare for CODE. Return a brand-new rtx that
+ represents the result of the compare. */
+
+static rtx
+rs6000_generate_compare (code)
+ enum rtx_code code;
+{
+ enum machine_mode comp_mode;
+ rtx compare_result;
+
+ if (rs6000_compare_fp_p)
+ comp_mode = CCFPmode;
+ else if (code == GTU || code == LTU
+ || code == GEU || code == LEU)
+ comp_mode = CCUNSmode;
+ else
+ comp_mode = CCmode;
+
+ /* First, the compare. */
+ compare_result = gen_reg_rtx (comp_mode);
+ emit_insn (gen_rtx_SET (VOIDmode, compare_result,
+ gen_rtx_COMPARE (comp_mode,
+ rs6000_compare_op0,
+ rs6000_compare_op1)));
+
+ /* Some kinds of FP comparisons need an OR operation;
+ except for flag_unsafe_math_optimizations we don't bother. */
+ if (rs6000_compare_fp_p
+ && ! flag_unsafe_math_optimizations
+ && (code == LE || code == GE
+ || code == UNEQ || code == LTGT
+ || code == UNGT || code == UNLT))
+ {
+ enum rtx_code or1, or2;
+ rtx or1_rtx, or2_rtx, compare2_rtx;
+ rtx or_result = gen_reg_rtx (CCEQmode);
+
+ switch (code)
+ {
+ case LE: or1 = LT; or2 = EQ; break;
+ case GE: or1 = GT; or2 = EQ; break;
+ case UNEQ: or1 = UNORDERED; or2 = EQ; break;
+ case LTGT: or1 = LT; or2 = GT; break;
+ case UNGT: or1 = UNORDERED; or2 = GT; break;
+ case UNLT: or1 = UNORDERED; or2 = LT; break;
+ default: abort ();
+ }
+ validate_condition_mode (or1, comp_mode);
+ validate_condition_mode (or2, comp_mode);
+ or1_rtx = gen_rtx (or1, SImode, compare_result, const0_rtx);
+ or2_rtx = gen_rtx (or2, SImode, compare_result, const0_rtx);
+ compare2_rtx = gen_rtx_COMPARE (CCEQmode,
+ gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
+ const_true_rtx);
+ emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
+
+ compare_result = or_result;
+ code = EQ;
+ }
+
+ validate_condition_mode (code, GET_MODE (compare_result));
+
+ return gen_rtx (code, VOIDmode, compare_result, const0_rtx);
+}
+
+
+/* Emit the RTL for an sCOND pattern. */
+
+void
+rs6000_emit_sCOND (code, result)
+ enum rtx_code code;
+ rtx result;
+{
+ rtx condition_rtx;
+ enum machine_mode op_mode;
+
+ condition_rtx = rs6000_generate_compare (code);
+
+ op_mode = GET_MODE (rs6000_compare_op0);
+ if (op_mode == VOIDmode)
+ op_mode = GET_MODE (rs6000_compare_op1);
+
+ if (TARGET_POWERPC64 && (op_mode == DImode || rs6000_compare_fp_p))
+ {
+ PUT_MODE (condition_rtx, DImode);
+ convert_move (result, condition_rtx, 0);
+ }
+ else
+ {
+ PUT_MODE (condition_rtx, SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
+ }
+}
+
+/* Emit a branch of kind CODE to location LOC. */
+
+void
+rs6000_emit_cbranch (code, loc)
+ enum rtx_code code;
+ rtx loc;
+{
+ rtx condition_rtx, loc_ref;
+
+ condition_rtx = rs6000_generate_compare (code);
+ loc_ref = gen_rtx_LABEL_REF (VOIDmode, loc);
+ emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
+ loc_ref, pc_rtx)));
+}
+
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label, or -1 if the branch is really a
+ conditional return.
+
+ OP is the conditional expression. XEXP (OP, 0) is assumed to be a
+ condition code register and its mode specifies what kind of
+ comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ INSN is the insn. */
+
+char *
+output_cbranch (op, label, reversed, insn)
+ rtx op;
+ const char * label;
+ int reversed;
+ rtx insn;
+{
+ static char string[64];
+ enum rtx_code code = GET_CODE (op);
+ rtx cc_reg = XEXP (op, 0);
+ enum machine_mode mode = GET_MODE (cc_reg);
+ int cc_regno = REGNO (cc_reg) - CR0_REGNO;
+ int need_longbranch = label != NULL && get_attr_length (insn) == 8;
+ int really_reversed = reversed ^ need_longbranch;
+ char *s = string;
+ const char *ccode;
+ const char *pred;
+ rtx note;
+
+ validate_condition_mode (code, mode);
+
+ /* Work out which way this really branches. We could use
+ reverse_condition_maybe_unordered here always but this
+ makes the resulting assembler clearer. */
+ if (really_reversed)
+ code = rs6000_reverse_condition (mode, code);
+
+ switch (code)
+ {
+ /* Not all of these are actually distinct opcodes, but
+ we distinguish them for clarity of the resulting assembler. */
+ case NE: case LTGT:
+ ccode = "ne"; break;
+ case EQ: case UNEQ:
+ ccode = "eq"; break;
+ case GE: case GEU:
+ ccode = "ge"; break;
+ case GT: case GTU: case UNGT:
+ ccode = "gt"; break;
+ case LE: case LEU:
+ ccode = "le"; break;
+ case LT: case LTU: case UNLT:
+ ccode = "lt"; break;
+ case UNORDERED: ccode = "un"; break;
+ case ORDERED: ccode = "nu"; break;
+ case UNGE: ccode = "nl"; break;
+ case UNLE: ccode = "ng"; break;
+ default:
+ abort ();
+ }
+
+ /* Maybe we have a guess as to how likely the branch is.
+ The old mnemonics don't have a way to specify this information. */
+ note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
+ if (note != NULL_RTX)
+ {
+ /* PROB is the difference from 50%. */
+ int prob = INTVAL (XEXP (note, 0)) - REG_BR_PROB_BASE / 2;
+
+ /* For branches that are very close to 50%, assume not-taken. */
+ if (abs (prob) > REG_BR_PROB_BASE / 20
+ && ((prob > 0) ^ need_longbranch))
+ pred = "+";
+ else
+ pred = "-";
+ }
+ else
+ pred = "";
+
+ if (label == NULL)
+ s += sprintf (s, "{b%sr|b%slr%s} ", ccode, ccode, pred);
+ else
+ s += sprintf (s, "{b%s|b%s%s} ", ccode, ccode, pred);
+
+ /* We need to escape any '%' characters in the reg_names string.
+ Assume they'd only be the first character... */
+ if (reg_names[cc_regno + CR0_REGNO][0] == '%')
+ *s++ = '%';
+ s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
+
+ if (label != NULL)
+ {
+ /* If the branch distance was too far, we may have to use an
+ unconditional branch to go the distance. */
+ if (need_longbranch)
+ s += sprintf (s, ",$+8\n\tb %s", label);
+ else
+ s += sprintf (s, ",%s", label);
+ }
+
+ return string;
+}
+
+/* Emit a conditional move: move TRUE_COND to DEST if OP of the
+ operands of the last comparison is nonzero/true, FALSE_COND if it
+ is zero/false. Return 0 if the hardware has no such operation. */
+
+int
+rs6000_emit_cmove (dest, op, true_cond, false_cond)
+ rtx dest;
+ rtx op;
+ rtx true_cond;
+ rtx false_cond;
+{
+ enum rtx_code code = GET_CODE (op);
+ rtx op0 = rs6000_compare_op0;
+ rtx op1 = rs6000_compare_op1;
+ REAL_VALUE_TYPE c1;
+ enum machine_mode mode = GET_MODE (op0);
+ rtx temp;
+
+ /* First, work out if the hardware can do this at all, or
+ if it's too slow... */
+ /* If the comparison is an integer one, since we only have fsel
+ it'll be cheaper to use a branch. */
+ if (! rs6000_compare_fp_p)
+ return 0;
+
+ /* Eliminate half of the comparisons by switching operands, this
+ makes the remaining code simpler. */
+ if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
+ || code == LTGT || code == LT)
+ {
+ code = reverse_condition_maybe_unordered (code);
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ }
+
+ /* UNEQ and LTGT take four instructions for a comparison with zero,
+ it'll probably be faster to use a branch here too. */
+ if (code == UNEQ)
+ return 0;
+
+ if (GET_CODE (op1) == CONST_DOUBLE)
+ REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
+
+ /* We're going to try to implement comparions by performing
+ a subtract, then comparing against zero. Unfortunately,
+ Inf - Inf is NaN which is not zero, and so if we don't
+ know that the the operand is finite and the comparison
+ would treat EQ different to UNORDERED, we can't do it. */
+ if (! flag_unsafe_math_optimizations
+ && code != GT && code != UNGE
+ && (GET_CODE (op1) != CONST_DOUBLE || target_isinf (c1))
+ /* Constructs of the form (a OP b ? a : b) are safe. */
+ && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
+ || (! rtx_equal_p (op0, true_cond)
+ && ! rtx_equal_p (op1, true_cond))))
+ return 0;
+ /* At this point we know we can use fsel. */
+
+ /* Reduce the comparison to a comparison against zero. */
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_MINUS (mode, op0, op1)));
+ op0 = temp;
+ op1 = CONST0_RTX (mode);
+
+ /* If we don't care about NaNs we can reduce some of the comparisons
+ down to faster ones. */
+ if (flag_unsafe_math_optimizations)
+ switch (code)
+ {
+ case GT:
+ code = LE;
+ temp = true_cond;
+ true_cond = false_cond;
+ false_cond = temp;
+ break;
+ case UNGE:
+ code = GE;
+ break;
+ case UNEQ:
+ code = EQ;
+ break;
+ default:
+ break;
+ }
+
+ /* Now, reduce everything down to a GE. */
+ switch (code)
+ {
+ case GE:
+ break;
+
+ case LE:
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ op0 = temp;
+ break;
+
+ case ORDERED:
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (mode, op0)));
+ op0 = temp;
+ break;
+
+ case EQ:
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_NEG (mode,
+ gen_rtx_ABS (mode, op0))));
+ op0 = temp;
+ break;
+
+ case UNGE:
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ false_cond = temp;
+ true_cond = false_cond;
+
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ op0 = temp;
+ break;
+
+ case GT:
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_IF_THEN_ELSE (mode,
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ true_cond = temp;
+ false_cond = true_cond;
+
+ temp = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (mode, op0)));
+ op0 = temp;
+ break;
+
+ default:
+ abort ();
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, dest,
+ gen_rtx_IF_THEN_ELSE (GET_MODE (dest),
+ gen_rtx_GE (VOIDmode,
+ op0, op1),
+ true_cond, false_cond)));
+ return 1;
+}
+
+void
+rs6000_emit_minmax (dest, code, op0, op1)
+ rtx dest;
+ enum rtx_code code;
+ rtx op0;
+ rtx op1;
+{
+ enum machine_mode mode = GET_MODE (op0);
+ rtx target;
+ if (code == SMAX || code == UMAX)
+ target = emit_conditional_move (dest, GE, op0, op1, mode,
+ op0, op1, mode, 0);
+ else
+ target = emit_conditional_move (dest, GE, op0, op1, mode,
+ op1, op0, mode, 0);
+ if (target == NULL_RTX)
+ abort ();
+ if (target != dest)
+ emit_move_insn (dest, target);
+}
+
+/* This page contains routines that are used to determine what the
+ function prologue and epilogue code will do and write them out. */
+
+/* Return the first fixed-point register that is required to be
+ saved. 32 if none. */
+
+int
+first_reg_to_save ()
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 13; first_reg <= 31; first_reg++)
+ if (regs_ever_live[first_reg]
+ && (! call_used_regs[first_reg]
+ || (first_reg == PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic)))))
+ break;
+
+ if (current_function_profile)
+ {
+ /* AIX must save/restore every register that contains a parameter
+ before/after the .__mcount call plus an additional register
+ for the static chain, if needed; use registers from 30 down to 22
+ to do this. */
+ if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_DARWIN)
+ {
+ int last_parm_reg, profile_first_reg;
+
+ /* Figure out last used parameter register. The proper thing
+ to do is to walk incoming args of the function. A function
+ might have live parameter registers even if it has no
+ incoming args. */
+ for (last_parm_reg = 10;
+ last_parm_reg > 2 && ! regs_ever_live [last_parm_reg];
+ last_parm_reg--)
+ ;
+
+ /* Calculate first reg for saving parameter registers
+ and static chain.
+ Skip reg 31 which may contain the frame pointer. */
+ profile_first_reg = (33 - last_parm_reg
+ - (current_function_needs_context ? 1 : 0));
+#if TARGET_MACHO
+ /* Need to skip another reg to account for R31 being PICBASE
+ (when flag_pic is set) or R30 being used as the frame
+ pointer (when flag_pic is not set). */
+ --profile_first_reg;
+#endif
+ /* Do not save frame pointer if no parameters needs to be saved. */
+ if (profile_first_reg == 31)
+ profile_first_reg = 32;
+
+ if (first_reg > profile_first_reg)
+ first_reg = profile_first_reg;
+ }
+
+ /* SVR4 may need one register to preserve the static chain. */
+ else if (current_function_needs_context)
+ {
+ /* Skip reg 31 which may contain the frame pointer. */
+ if (first_reg > 30)
+ first_reg = 30;
+ }
+ }
+
+#if TARGET_MACHO
+ if (flag_pic && current_function_uses_pic_offset_table &&
+ (first_reg > PIC_OFFSET_TABLE_REGNUM))
+ return PIC_OFFSET_TABLE_REGNUM;
+#endif
+
+ return first_reg;
+}
+
+/* Similar, for FP regs. */
+
+int
+first_fp_reg_to_save ()
+{
+ int first_reg;
+
+ /* Find lowest numbered live register. */
+ for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
+ if (regs_ever_live[first_reg])
+ break;
+
+ return first_reg;
+}
+
+/* Similar, for AltiVec regs. */
+
+static int
+first_altivec_reg_to_save ()
+{
+ int i;
+
+ /* Stack frame remains as is unless we are in AltiVec ABI. */
+ if (! TARGET_ALTIVEC_ABI)
+ return LAST_ALTIVEC_REGNO + 1;
+
+ /* Find lowest numbered live register. */
+ for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (regs_ever_live[i])
+ break;
+
+ return i;
+}
+
+/* Return a 32-bit mask of the AltiVec registers we need to set in
+ VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
+ the 32-bit word is 0. */
+
+static unsigned int
+compute_vrsave_mask ()
+{
+ unsigned int i, mask = 0;
+
+ /* First, find out if we use _any_ altivec registers. */
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (regs_ever_live[i])
+ mask |= ALTIVEC_REG_BIT (i);
+
+ if (mask == 0)
+ return mask;
+
+ /* Next, add all registers that are call-clobbered. We do this
+ because post-reload register optimizers such as regrename_optimize
+ may choose to use them. They never change the register class
+ chosen by reload, so cannot create new uses of altivec registers
+ if there were none before, so the early exit above is safe. */
+ /* ??? Alternately, we could define HARD_REGNO_RENAME_OK to disallow
+ altivec registers not saved in the mask, which might well make the
+ adjustments below more effective in eliding the save/restore of
+ VRSAVE in small functions. */
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (call_used_regs[i])
+ mask |= ALTIVEC_REG_BIT (i);
+
+ /* Next, remove the argument registers from the set. These must
+ be in the VRSAVE mask set by the caller, so we don't need to add
+ them in again. More importantly, the mask we compute here is
+ used to generate CLOBBERs in the set_vrsave insn, and we do not
+ wish the argument registers to die. */
+ for (i = cfun->args_info.vregno; i >= ALTIVEC_ARG_MIN_REG; --i)
+ mask &= ~ALTIVEC_REG_BIT (i);
+
+ /* Similarly, remove the return value from the set. */
+ {
+ bool yes = false;
+ diddle_return_value (is_altivec_return_reg, &yes);
+ if (yes)
+ mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
+ }
+
+ return mask;
+}
+
+static void
+is_altivec_return_reg (reg, xyes)
+ rtx reg;
+ void *xyes;
+{
+ bool *yes = (bool *) xyes;
+ if (REGNO (reg) == ALTIVEC_ARG_RETURN)
+ *yes = true;
+}
+
+
+/* Calculate the stack information for the current function. This is
+ complicated by having two separate calling sequences, the AIX calling
+ sequence and the V.4 calling sequence.
+
+ AIX (and Darwin/Mac OS X) stack frames look like:
+ 32-bit 64-bit
+ SP----> +---------------------------------------+
+ | back chain to caller | 0 0
+ +---------------------------------------+
+ | saved CR | 4 8 (8-11)
+ +---------------------------------------+
+ | saved LR | 8 16
+ +---------------------------------------+
+ | reserved for compilers | 12 24
+ +---------------------------------------+
+ | reserved for binders | 16 32
+ +---------------------------------------+
+ | saved TOC pointer | 20 40
+ +---------------------------------------+
+ | Parameter save area (P) | 24 48
+ +---------------------------------------+
+ | Alloca space (A) | 24+P etc.
+ +---------------------------------------+
+ | Local variable space (L) | 24+P+A
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 24+P+A+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 24+P+A+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 24+P+A+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
+ +---------------------------------------+
+ | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for AIX configurations is two words (i.e., 8
+ or 16 bytes).
+
+
+ V.4 stack frames look like:
+
+ SP----> +---------------------------------------+
+ | back chain to caller | 0
+ +---------------------------------------+
+ | caller's saved LR | 4
+ +---------------------------------------+
+ | Parameter save area (P) | 8
+ +---------------------------------------+
+ | Alloca space (A) | 8+P
+ +---------------------------------------+
+ | Varargs save area (V) | 8+P+A
+ +---------------------------------------+
+ | Local variable space (L) | 8+P+A+V
+ +---------------------------------------+
+ | Float/int conversion temporary (X) | 8+P+A+V+L
+ +---------------------------------------+
+ | Save area for AltiVec registers (W) | 8+P+A+V+L+X
+ +---------------------------------------+
+ | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
+ +---------------------------------------+
+ | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
+ +---------------------------------------+
+ | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
+ +---------------------------------------+
+ | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
+ +---------------------------------------+
+ | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
+ +---------------------------------------+
+ old SP->| back chain to caller's caller |
+ +---------------------------------------+
+
+ The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
+ given. (But note below and in sysv4.h that we require only 8 and
+ may round up the size of our stack frame anyways. The historical
+ reason is early versions of powerpc-linux which didn't properly
+ align the stack at program startup. A happy side-effect is that
+ -mno-eabi libraries can be used with -meabi programs.)
+
+ The EABI configuration defaults to the V.4 layout, unless
+ -mcall-aix is used, in which case the AIX layout is used. However,
+ the stack alignment requirements may differ. If -mno-eabi is not
+ given, the required stack alignment is 8 bytes; if -mno-eabi is
+ given, the required alignment is 16 bytes. (But see V.4 comment
+ above.) */
+
+#ifndef ABI_STACK_BOUNDARY
+#define ABI_STACK_BOUNDARY STACK_BOUNDARY
+#endif
+
+rs6000_stack_t *
+rs6000_stack_info ()
+{
+ static rs6000_stack_t info, zero_info;
+ rs6000_stack_t *info_ptr = &info;
+ int reg_size = TARGET_POWERPC64 ? 8 : 4;
+ enum rs6000_abi abi;
+ int ehrd_size;
+ int total_raw_size;
+
+ /* Zero all fields portably. */
+ info = zero_info;
+
+ /* Select which calling sequence. */
+ info_ptr->abi = abi = DEFAULT_ABI;
+
+ /* Calculate which registers need to be saved & save area size. */
+ info_ptr->first_gp_reg_save = first_reg_to_save ();
+ /* Assume that we will have to save PIC_OFFSET_TABLE_REGNUM,
+ even if it currently looks like we won't. */
+ if (((TARGET_TOC && TARGET_MINIMAL_TOC)
+ || (flag_pic == 1 && abi == ABI_V4)
+ || (flag_pic && abi == ABI_DARWIN))
+ && info_ptr->first_gp_reg_save > PIC_OFFSET_TABLE_REGNUM)
+ info_ptr->gp_size = reg_size * (32 - PIC_OFFSET_TABLE_REGNUM);
+ else
+ info_ptr->gp_size = reg_size * (32 - info_ptr->first_gp_reg_save);
+
+ info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
+ info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
+
+ info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
+ info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
+ - info_ptr->first_altivec_reg_save);
+
+ /* Does this function call anything? */
+ info_ptr->calls_p = (! current_function_is_leaf
+ || cfun->machine->ra_needs_full_frame);
+
+ /* Determine if we need to save the link register. */
+ if (rs6000_ra_ever_killed ()
+ || (DEFAULT_ABI == ABI_AIX && current_function_profile)
+#ifdef TARGET_RELOCATABLE
+ || (TARGET_RELOCATABLE && (get_pool_size () != 0))
+#endif
+ || (info_ptr->first_fp_reg_save != 64
+ && !FP_SAVE_INLINE (info_ptr->first_fp_reg_save))
+ || info_ptr->first_altivec_reg_save <= LAST_ALTIVEC_REGNO
+ || (abi == ABI_V4 && current_function_calls_alloca)
+ || (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic
+ && current_function_uses_pic_offset_table)
+ || info_ptr->calls_p)
+ {
+ info_ptr->lr_save_p = 1;
+ regs_ever_live[LINK_REGISTER_REGNUM] = 1;
+ }
+
+ /* Determine if we need to save the condition code registers. */
+ if (regs_ever_live[CR2_REGNO]
+ || regs_ever_live[CR3_REGNO]
+ || regs_ever_live[CR4_REGNO])
+ {
+ info_ptr->cr_save_p = 1;
+ if (abi == ABI_V4)
+ info_ptr->cr_size = reg_size;
+ }
+
+ /* If the current function calls __builtin_eh_return, then we need
+ to allocate stack space for registers that will hold data for
+ the exception handler. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i;
+ for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
+ continue;
+ ehrd_size = i * UNITS_PER_WORD;
+ }
+ else
+ ehrd_size = 0;
+
+ /* Determine various sizes. */
+ info_ptr->reg_size = reg_size;
+ info_ptr->fixed_size = RS6000_SAVE_AREA;
+ info_ptr->varargs_size = RS6000_VARARGS_AREA;
+ info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
+ info_ptr->parm_size = RS6000_ALIGN (current_function_outgoing_args_size,
+ 8);
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_mask = compute_vrsave_mask ();
+ info_ptr->vrsave_size = info_ptr->vrsave_mask ? 4 : 0;
+ }
+ else
+ {
+ info_ptr->vrsave_mask = 0;
+ info_ptr->vrsave_size = 0;
+ }
+
+ /* Calculate the offsets. */
+ switch (abi)
+ {
+ case ABI_NONE:
+ default:
+ abort ();
+
+ case ABI_AIX:
+ case ABI_AIX_NODESC:
+ case ABI_DARWIN:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->gp_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = 16 - (-info_ptr->vrsave_save_offset % 16);
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+
+ /* Adjust for AltiVec case. */
+ info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
+ }
+ else
+ info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
+ info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
+ info_ptr->lr_save_offset = 2*reg_size;
+ break;
+
+ case ABI_V4:
+ info_ptr->fp_save_offset = - info_ptr->fp_size;
+ info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
+ info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
+
+ if (TARGET_ALTIVEC_ABI)
+ {
+ info_ptr->vrsave_save_offset
+ = info_ptr->cr_save_offset - info_ptr->vrsave_size;
+
+ /* Align stack so vector save area is on a quadword boundary. */
+ if (info_ptr->altivec_size != 0)
+ info_ptr->altivec_padding_size
+ = 16 - (-info_ptr->vrsave_save_offset % 16);
+ else
+ info_ptr->altivec_padding_size = 0;
+
+ info_ptr->altivec_save_offset
+ = info_ptr->vrsave_save_offset
+ - info_ptr->altivec_padding_size
+ - info_ptr->altivec_size;
+
+ /* Adjust for AltiVec case. */
+ info_ptr->toc_save_offset
+ = info_ptr->altivec_save_offset - info_ptr->toc_size;
+ }
+ else
+ info_ptr->toc_save_offset = info_ptr->cr_save_offset - info_ptr->toc_size;
+ info_ptr->ehrd_offset = info_ptr->toc_save_offset - ehrd_size;
+ info_ptr->lr_save_offset = reg_size;
+ break;
+ }
+
+ info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
+ + info_ptr->gp_size
+ + info_ptr->altivec_size
+ + info_ptr->altivec_padding_size
+ + info_ptr->vrsave_size
+ + ehrd_size
+ + info_ptr->cr_size
+ + info_ptr->lr_size
+ + info_ptr->vrsave_size
+ + info_ptr->toc_size,
+ (TARGET_ALTIVEC_ABI || ABI_DARWIN)
+ ? 16 : 8);
+
+ total_raw_size = (info_ptr->vars_size
+ + info_ptr->parm_size
+ + info_ptr->save_size
+ + info_ptr->varargs_size
+ + info_ptr->fixed_size);
+
+ info_ptr->total_size =
+ RS6000_ALIGN (total_raw_size, ABI_STACK_BOUNDARY / BITS_PER_UNIT);
+
+ /* Determine if we need to allocate any stack frame:
+
+ For AIX we need to push the stack if a frame pointer is needed
+ (because the stack might be dynamically adjusted), if we are
+ debugging, if we make calls, or if the sum of fp_save, gp_save,
+ and local variables are more than the space needed to save all
+ non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
+ + 18*8 = 288 (GPR13 reserved).
+
+ For V.4 we don't have the stack cushion that AIX uses, but assume
+ that the debugger can handle stackless frames. */
+
+ if (info_ptr->calls_p)
+ info_ptr->push_p = 1;
+
+ else if (abi == ABI_V4)
+ info_ptr->push_p = (total_raw_size > info_ptr->fixed_size
+ || info_ptr->calls_p);
+
+ else
+ info_ptr->push_p = (frame_pointer_needed
+ || (abi != ABI_DARWIN && write_symbols != NO_DEBUG)
+ || ((total_raw_size - info_ptr->fixed_size)
+ > (TARGET_32BIT ? 220 : 288)));
+
+ /* Zero offsets if we're not saving those registers. */
+ if (info_ptr->fp_size == 0)
+ info_ptr->fp_save_offset = 0;
+
+ if (info_ptr->gp_size == 0)
+ info_ptr->gp_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
+ info_ptr->altivec_save_offset = 0;
+
+ if (! TARGET_ALTIVEC_ABI || info_ptr->vrsave_mask == 0)
+ info_ptr->vrsave_save_offset = 0;
+
+ if (! info_ptr->lr_save_p)
+ info_ptr->lr_save_offset = 0;
+
+ if (! info_ptr->cr_save_p)
+ info_ptr->cr_save_offset = 0;
+
+ if (! info_ptr->toc_save_p)
+ info_ptr->toc_save_offset = 0;
+
+ return info_ptr;
+}
+
+void
+debug_stack_info (info)
+ rs6000_stack_t *info;
+{
+ const char *abi_string;
+
+ if (! info)
+ info = rs6000_stack_info ();
+
+ fprintf (stderr, "\nStack information for function %s:\n",
+ ((current_function_decl && DECL_NAME (current_function_decl))
+ ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
+ : "<unknown>"));
+
+ switch (info->abi)
+ {
+ default: abi_string = "Unknown"; break;
+ case ABI_NONE: abi_string = "NONE"; break;
+ case ABI_AIX:
+ case ABI_AIX_NODESC: abi_string = "AIX"; break;
+ case ABI_DARWIN: abi_string = "Darwin"; break;
+ case ABI_V4: abi_string = "V.4"; break;
+ }
+
+ fprintf (stderr, "\tABI = %5s\n", abi_string);
+
+ if (TARGET_ALTIVEC_ABI)
+ fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
+
+ if (info->first_gp_reg_save != 32)
+ fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
+
+ if (info->first_fp_reg_save != 64)
+ fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
+
+ if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
+ fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
+ info->first_altivec_reg_save);
+
+ if (info->lr_save_p)
+ fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
+
+ if (info->cr_save_p)
+ fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
+
+ if (info->toc_save_p)
+ fprintf (stderr, "\ttoc_save_p = %5d\n", info->toc_save_p);
+
+ if (info->vrsave_mask)
+ fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
+
+ if (info->push_p)
+ fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
+
+ if (info->calls_p)
+ fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
+
+ if (info->gp_save_offset)
+ fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
+
+ if (info->fp_save_offset)
+ fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
+
+ if (info->altivec_save_offset)
+ fprintf (stderr, "\taltivec_save_offset = %5d\n",
+ info->altivec_save_offset);
+
+ if (info->vrsave_save_offset)
+ fprintf (stderr, "\tvrsave_save_offset = %5d\n",
+ info->vrsave_save_offset);
+
+ if (info->lr_save_offset)
+ fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
+
+ if (info->cr_save_offset)
+ fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
+
+ if (info->toc_save_offset)
+ fprintf (stderr, "\ttoc_save_offset = %5d\n", info->toc_save_offset);
+
+ if (info->varargs_save_offset)
+ fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
+
+ if (info->total_size)
+ fprintf (stderr, "\ttotal_size = %5d\n", info->total_size);
+
+ if (info->varargs_size)
+ fprintf (stderr, "\tvarargs_size = %5d\n", info->varargs_size);
+
+ if (info->vars_size)
+ fprintf (stderr, "\tvars_size = %5d\n", info->vars_size);
+
+ if (info->parm_size)
+ fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
+
+ if (info->fixed_size)
+ fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
+
+ if (info->gp_size)
+ fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
+
+ if (info->fp_size)
+ fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
+
+ if (info->altivec_size)
+ fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
+
+ if (info->vrsave_size)
+ fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
+
+ if (info->altivec_padding_size)
+ fprintf (stderr, "\taltivec_padding_size= %5d\n",
+ info->altivec_padding_size);
+
+ if (info->lr_size)
+ fprintf (stderr, "\tlr_size = %5d\n", info->lr_size);
+
+ if (info->cr_size)
+ fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
+
+ if (info->toc_size)
+ fprintf (stderr, "\ttoc_size = %5d\n", info->toc_size);
+
+ if (info->save_size)
+ fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
+
+ if (info->reg_size != 4)
+ fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
+
+ fprintf (stderr, "\n");
+}
+
+rtx
+rs6000_return_addr (count, frame)
+ int count;
+ rtx frame;
+{
+ /* Currently we don't optimize very well between prolog and body
+ code and for PIC code the code can be actually quite bad, so
+ don't try to be too clever here. */
+ if (count != 0
+ || flag_pic != 0
+ || DEFAULT_ABI == ABI_AIX
+ || DEFAULT_ABI == ABI_AIX_NODESC)
+ {
+ cfun->machine->ra_needs_full_frame = 1;
+
+ return
+ gen_rtx_MEM
+ (Pmode,
+ memory_address
+ (Pmode,
+ plus_constant (copy_to_reg
+ (gen_rtx_MEM (Pmode,
+ memory_address (Pmode, frame))),
+ RETURN_ADDRESS_OFFSET)));
+ }
+
+ return get_hard_reg_initial_val (Pmode, LINK_REGISTER_REGNUM);
+}
+
+static int
+rs6000_ra_ever_killed ()
+{
+ rtx top;
+
+#ifdef ASM_OUTPUT_MI_THUNK
+ if (current_function_is_thunk)
+ return 0;
+#endif
+ if (!has_hard_reg_initial_val (Pmode, LINK_REGISTER_REGNUM)
+ || cfun->machine->ra_needs_full_frame)
+ return regs_ever_live[LINK_REGISTER_REGNUM];
+
+ push_topmost_sequence ();
+ top = get_insns ();
+ pop_topmost_sequence ();
+
+ return reg_set_between_p (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
+ top, NULL_RTX);
+}
+
+/* Add a REG_MAYBE_DEAD note to the insn. */
+static void
+rs6000_maybe_dead (insn)
+ rtx insn;
+{
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD,
+ const0_rtx,
+ REG_NOTES (insn));
+}
+
+/* Emit instructions needed to load the TOC register.
+ This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
+ a constant pool; or for SVR4 -fpic. */
+
+void
+rs6000_emit_load_toc_table (fromprolog)
+ int fromprolog;
+{
+ rtx dest;
+ dest = gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM);
+
+ if (TARGET_ELF && DEFAULT_ABI != ABI_AIX)
+ {
+ if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ {
+ rtx temp = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_pic_si (temp)));
+ rs6000_maybe_dead (emit_move_insn (dest, temp));
+ }
+ else if (flag_pic == 2)
+ {
+ char buf[30];
+ rtx tempLR = (fromprolog
+ ? gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+ : gen_reg_rtx (Pmode));
+ rtx temp0 = (fromprolog
+ ? gen_rtx_REG (Pmode, 0)
+ : gen_reg_rtx (Pmode));
+ rtx symF;
+
+ /* possibly create the toc section */
+ if (! toc_initialized)
+ {
+ toc_section ();
+ function_section (current_function_decl);
+ }
+
+ if (fromprolog)
+ {
+ rtx symL;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
+ symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
+ symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_PIC_1 (tempLR,
+ symF)));
+ rs6000_maybe_dead (emit_move_insn (dest, tempLR));
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest,
+ symL,
+ symF)));
+ }
+ else
+ {
+ rtx tocsym;
+ static int reload_toc_labelno = 0;
+
+ tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCG", reload_toc_labelno++);
+ symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ rs6000_maybe_dead (emit_insn (gen_load_toc_v4_PIC_1b (tempLR,
+ symF,
+ tocsym)));
+ rs6000_maybe_dead (emit_move_insn (dest, tempLR));
+ rs6000_maybe_dead (emit_move_insn (temp0,
+ gen_rtx_MEM (Pmode, dest)));
+ }
+ rs6000_maybe_dead (emit_insn (gen_addsi3 (dest, temp0, dest)));
+ }
+ else if (flag_pic == 0 && TARGET_MINIMAL_TOC)
+ {
+ /* This is for AIX code running in non-PIC ELF. */
+ char buf[30];
+ rtx realsym;
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
+ realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+
+ rs6000_maybe_dead (emit_insn (gen_elf_high (dest, realsym)));
+ rs6000_maybe_dead (emit_insn (gen_elf_low (dest, dest, realsym)));
+ }
+ else
+ abort ();
+ }
+ else
+ {
+ if (TARGET_32BIT)
+ rs6000_maybe_dead (emit_insn (gen_load_toc_aix_si (dest)));
+ else
+ rs6000_maybe_dead (emit_insn (gen_load_toc_aix_di (dest)));
+ }
+}
+
+int
+get_TOC_alias_set ()
+{
+ static int set = -1;
+ if (set == -1)
+ set = new_alias_set ();
+ return set;
+}
+
+/* This retuns nonzero if the current function uses the TOC. This is
+ determined by the presence of (unspec ... 7), which is generated by
+ the various load_toc_* patterns. */
+
+int
+uses_TOC ()
+{
+ rtx insn;
+
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (INSN_P (insn))
+ {
+ rtx pat = PATTERN (insn);
+ int i;
+
+ if (GET_CODE (pat) == PARALLEL)
+ for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
+ if (GET_CODE (XVECEXP (PATTERN (insn), 0, i)) == UNSPEC
+ && XINT (XVECEXP (PATTERN (insn), 0, i), 1) == 7)
+ return 1;
+ }
+ return 0;
+}
+
+rtx
+create_TOC_reference (symbol)
+ rtx symbol;
+{
+ return gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (Pmode, TOC_REGISTER),
+ gen_rtx_CONST (Pmode,
+ gen_rtx_MINUS (Pmode, symbol,
+ gen_rtx_SYMBOL_REF (Pmode, toc_label_name))));
+}
+
+#if TARGET_AIX
+/* __throw will restore its own return address to be the same as the
+ return address of the function that the throw is being made to.
+ This is unfortunate, because we want to check the original
+ return address to see if we need to restore the TOC.
+ So we have to squirrel it away here.
+ This is used only in compiling __throw and __rethrow.
+
+ Most of this code should be removed by CSE. */
+static rtx insn_after_throw;
+
+/* This does the saving... */
+void
+rs6000_aix_emit_builtin_unwind_init ()
+{
+ rtx mem;
+ rtx stack_top = gen_reg_rtx (Pmode);
+ rtx opcode_addr = gen_reg_rtx (Pmode);
+
+ insn_after_throw = gen_reg_rtx (SImode);
+
+ mem = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ emit_move_insn (stack_top, mem);
+
+ mem = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, stack_top,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (opcode_addr, mem);
+ emit_move_insn (insn_after_throw, gen_rtx_MEM (SImode, opcode_addr));
+}
+
+/* Emit insns to _restore_ the TOC register, at runtime (specifically
+ in _eh.o). Only used on AIX.
+
+ The idea is that on AIX, function calls look like this:
+ bl somefunction-trampoline
+ lwz r2,20(sp)
+
+ and later,
+ somefunction-trampoline:
+ stw r2,20(sp)
+ ... load function address in the count register ...
+ bctr
+ or like this, if the linker determines that this is not a cross-module call
+ and so the TOC need not be restored:
+ bl somefunction
+ nop
+ or like this, if the compiler could determine that this is not a
+ cross-module call:
+ bl somefunction
+ now, the tricky bit here is that register 2 is saved and restored
+ by the _linker_, so we can't readily generate debugging information
+ for it. So we need to go back up the call chain looking at the
+ insns at return addresses to see which calls saved the TOC register
+ and so see where it gets restored from.
+
+ Oh, and all this gets done in RTL inside the eh_epilogue pattern,
+ just before the actual epilogue.
+
+ On the bright side, this incurs no space or time overhead unless an
+ exception is thrown, except for the extra code in libgcc.a.
+
+ The parameter STACKSIZE is a register containing (at runtime)
+ the amount to be popped off the stack in addition to the stack frame
+ of this routine (which will be __throw or __rethrow, and so is
+ guaranteed to have a stack frame). */
+
+void
+rs6000_emit_eh_toc_restore (stacksize)
+ rtx stacksize;
+{
+ rtx top_of_stack;
+ rtx bottom_of_stack = gen_reg_rtx (Pmode);
+ rtx tocompare = gen_reg_rtx (SImode);
+ rtx opcode = gen_reg_rtx (SImode);
+ rtx opcode_addr = gen_reg_rtx (Pmode);
+ rtx mem;
+ rtx loop_start = gen_label_rtx ();
+ rtx no_toc_restore_needed = gen_label_rtx ();
+ rtx loop_exit = gen_label_rtx ();
+
+ mem = gen_rtx_MEM (Pmode, hard_frame_pointer_rtx);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+ emit_move_insn (bottom_of_stack, mem);
+
+ top_of_stack = expand_binop (Pmode, add_optab,
+ bottom_of_stack, stacksize,
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (tocompare,
+ GEN_INT (trunc_int_for_mode (TARGET_32BIT
+ ? 0x80410014
+ : 0xE8410028, SImode)));
+
+ if (insn_after_throw == NULL_RTX)
+ abort ();
+ emit_move_insn (opcode, insn_after_throw);
+
+ emit_note (NULL, NOTE_INSN_LOOP_BEG);
+ emit_label (loop_start);
+
+ do_compare_rtx_and_jump (opcode, tocompare, NE, 1,
+ SImode, NULL_RTX, NULL_RTX,
+ no_toc_restore_needed);
+
+ mem = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, bottom_of_stack,
+ GEN_INT (5 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (gen_rtx_REG (Pmode, 2), mem);
+
+ emit_label (no_toc_restore_needed);
+ do_compare_rtx_and_jump (top_of_stack, bottom_of_stack, EQ, 1,
+ Pmode, NULL_RTX, NULL_RTX,
+ loop_exit);
+
+ mem = gen_rtx_MEM (Pmode, bottom_of_stack);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+ emit_move_insn (bottom_of_stack, mem);
+
+ mem = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode, bottom_of_stack,
+ GEN_INT (2 * GET_MODE_SIZE (Pmode))));
+ emit_move_insn (opcode_addr, mem);
+ emit_move_insn (opcode, gen_rtx_MEM (SImode, opcode_addr));
+
+ emit_note (NULL, NOTE_INSN_LOOP_CONT);
+ emit_jump (loop_start);
+ emit_note (NULL, NOTE_INSN_LOOP_END);
+ emit_label (loop_exit);
+}
+#endif /* TARGET_AIX */
+
+/* This ties together stack memory (MEM with an alias set of
+ rs6000_sr_alias_set) and the change to the stack pointer. */
+
+static void
+rs6000_emit_stack_tie ()
+{
+ rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM));
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+ emit_insn (gen_stack_tie (mem));
+}
+
+/* Emit the correct code for allocating stack space, as insns.
+ If COPY_R12, make sure a copy of the old frame is left in r12.
+ The generated code may use hard register 0 as a temporary. */
+
+static void
+rs6000_emit_allocate_stack (size, copy_r12)
+ HOST_WIDE_INT size;
+ int copy_r12;
+{
+ rtx insn;
+ rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx tmp_reg = gen_rtx_REG (Pmode, 0);
+ rtx todec = GEN_INT (-size);
+
+ if (current_function_limit_stack)
+ {
+ if (REG_P (stack_limit_rtx)
+ && REGNO (stack_limit_rtx) > 1
+ && REGNO (stack_limit_rtx) <= 31)
+ {
+ emit_insn (Pmode == SImode
+ ? gen_addsi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size))
+ : gen_adddi3 (tmp_reg,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
+ && TARGET_32BIT
+ && DEFAULT_ABI == ABI_V4)
+ {
+ rtx toload = gen_rtx_CONST (VOIDmode,
+ gen_rtx_PLUS (Pmode,
+ stack_limit_rtx,
+ GEN_INT (size)));
+
+ emit_insn (gen_elf_high (tmp_reg, toload));
+ emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
+ emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
+ const0_rtx));
+ }
+ else
+ warning ("stack limit expression is not supported");
+ }
+
+ if (copy_r12 || ! TARGET_UPDATE)
+ emit_move_insn (gen_rtx_REG (Pmode, 12), stack_reg);
+
+ if (TARGET_UPDATE)
+ {
+ if (size > 32767)
+ {
+ /* Need a note here so that try_split doesn't get confused. */
+ if (get_last_insn() == NULL_RTX)
+ emit_note (0, NOTE_INSN_DELETED);
+ insn = emit_move_insn (tmp_reg, todec);
+ try_split (PATTERN (insn), insn, 0);
+ todec = tmp_reg;
+ }
+
+ if (Pmode == SImode)
+ insn = emit_insn (gen_movsi_update (stack_reg, stack_reg,
+ todec, stack_reg));
+ else
+ insn = emit_insn (gen_movdi_update (stack_reg, stack_reg,
+ todec, stack_reg));
+ }
+ else
+ {
+ if (Pmode == SImode)
+ insn = emit_insn (gen_addsi3 (stack_reg, stack_reg, todec));
+ else
+ insn = emit_insn (gen_adddi3 (stack_reg, stack_reg, todec));
+ emit_move_insn (gen_rtx_MEM (Pmode, stack_reg),
+ gen_rtx_REG (Pmode, 12));
+ }
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) =
+ gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_reg,
+ gen_rtx_PLUS (Pmode, stack_reg,
+ GEN_INT (-size))),
+ REG_NOTES (insn));
+}
+
+/* Add a RTX_FRAME_RELATED note so that dwarf2out_frame_debug_expr
+ knows that:
+
+ (mem (plus (blah) (regXX)))
+
+ is really:
+
+ (mem (plus (blah) (const VALUE_OF_REGXX))). */
+
+static void
+altivec_frame_fixup (insn, reg, val)
+ rtx insn, reg;
+ HOST_WIDE_INT val;
+{
+ rtx real;
+
+ real = copy_rtx (PATTERN (insn));
+
+ real = replace_rtx (real, reg, GEN_INT (val));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ real,
+ REG_NOTES (insn));
+}
+
+/* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
+ with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
+ is not NULL. It would be nice if dwarf2out_frame_debug_expr could
+ deduce these equivalences by itself so it wasn't necessary to hold
+ its hand so much. */
+
+static void
+rs6000_frame_related (insn, reg, val, reg2, rreg)
+ rtx insn;
+ rtx reg;
+ HOST_WIDE_INT val;
+ rtx reg2;
+ rtx rreg;
+{
+ rtx real, temp;
+
+ real = copy_rtx (PATTERN (insn));
+
+ real = replace_rtx (real, reg,
+ gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
+ STACK_POINTER_REGNUM),
+ GEN_INT (val)));
+
+ /* We expect that 'real' is either a SET or a PARALLEL containing
+ SETs (and possibly other stuff). In a PARALLEL, all the SETs
+ are important so they all have to be marked RTX_FRAME_RELATED_P. */
+
+ if (GET_CODE (real) == SET)
+ {
+ rtx set = real;
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ }
+ else if (GET_CODE (real) == PARALLEL)
+ {
+ int i;
+ for (i = 0; i < XVECLEN (real, 0); i++)
+ if (GET_CODE (XVECEXP (real, 0, i)) == SET)
+ {
+ rtx set = XVECEXP (real, 0, i);
+
+ temp = simplify_rtx (SET_SRC (set));
+ if (temp)
+ SET_SRC (set) = temp;
+ temp = simplify_rtx (SET_DEST (set));
+ if (temp)
+ SET_DEST (set) = temp;
+ if (GET_CODE (SET_DEST (set)) == MEM)
+ {
+ temp = simplify_rtx (XEXP (SET_DEST (set), 0));
+ if (temp)
+ XEXP (SET_DEST (set), 0) = temp;
+ }
+ RTX_FRAME_RELATED_P (set) = 1;
+ }
+ }
+ else
+ abort ();
+
+ if (reg2 != NULL_RTX)
+ real = replace_rtx (real, reg2, rreg);
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
+ real,
+ REG_NOTES (insn));
+}
+
+/* Returns an insn that has a vrsave set operation with the
+ appropriate CLOBBERs. */
+
+static rtx
+generate_set_vrsave (reg, info, epiloguep)
+ rtx reg;
+ rs6000_stack_t *info;
+ int epiloguep;
+{
+ int nclobs, i;
+ rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
+ rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+
+ clobs[0]
+ = gen_rtx_SET (VOIDmode,
+ vrsave,
+ gen_rtx_UNSPEC_VOLATILE (SImode,
+ gen_rtvec (2, reg, vrsave),
+ 30));
+
+ nclobs = 1;
+
+ /* We need to clobber the registers in the mask so the scheduler
+ does not move sets to VRSAVE before sets of AltiVec registers.
+
+ However, if the function receives nonlocal gotos, reload will set
+ all call saved registers live. We will end up with:
+
+ (set (reg 999) (mem))
+ (parallel [ (set (reg vrsave) (unspec blah))
+ (clobber (reg 999))])
+
+ The clobber will cause the store into reg 999 to be dead, and
+ flow will attempt to delete an epilogue insn. In this case, we
+ need an unspec use/set of the register. */
+
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask != 0 && ALTIVEC_REG_BIT (i) != 0)
+ {
+ if (!epiloguep || call_used_regs [i])
+ clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (V4SImode, i));
+ else
+ {
+ rtx reg = gen_rtx_REG (V4SImode, i);
+
+ clobs[nclobs++]
+ = gen_rtx_SET (VOIDmode,
+ reg,
+ gen_rtx_UNSPEC (V4SImode,
+ gen_rtvec (1, reg), 27));
+ }
+ }
+
+ insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
+
+ for (i = 0; i < nclobs; ++i)
+ XVECEXP (insn, 0, i) = clobs[i];
+
+ return insn;
+}
+
+/* Emit function prologue as insns. */
+
+void
+rs6000_emit_prologue ()
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ enum machine_mode reg_mode = TARGET_POWERPC64 ? DImode : SImode;
+ int reg_size = TARGET_POWERPC64 ? 8 : 4;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
+ rtx frame_ptr_rtx = gen_rtx_REG (Pmode, 12);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ rtx cr_save_rtx = NULL;
+ rtx insn;
+ int saving_FPRs_inline;
+ int using_store_multiple;
+ HOST_WIDE_INT sp_offset = 0;
+
+ using_store_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && info->first_gp_reg_save < 31);
+ saving_FPRs_inline = (info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save));
+
+ /* For V.4, update stack before we do any saving and set back pointer. */
+ if (info->push_p && DEFAULT_ABI == ABI_V4)
+ {
+ if (info->total_size < 32767)
+ sp_offset = info->total_size;
+ else
+ frame_reg_rtx = frame_ptr_rtx;
+ rs6000_emit_allocate_stack (info->total_size,
+ (frame_reg_rtx != sp_reg_rtx
+ && (info->cr_save_p
+ || info->lr_save_p
+ || info->first_fp_reg_save < 64
+ || info->first_gp_reg_save < 32
+ )));
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+ }
+
+ /* Save AltiVec registers if needed. */
+ if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ {
+ int i;
+
+ /* There should be a non inline version of this, for when we
+ are saving lots of vector registers. */
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx areg, savereg, mem;
+ int offset;
+
+ offset = info->altivec_save_offset + sp_offset
+ + 16 * (i - info->first_altivec_reg_save);
+
+ savereg = gen_rtx_REG (V4SImode, i);
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn (areg, GEN_INT (offset));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ mem = gen_rtx_MEM (V4SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ insn = emit_move_insn (mem, savereg);
+
+ altivec_frame_fixup (insn, areg, offset);
+ }
+ }
+
+ /* VRSAVE is a bit vector representing which AltiVec registers
+ are used. The OS uses this to determine which vector
+ registers to save on a context switch. We need to save
+ VRSAVE on the stack frame, add whatever AltiVec registers we
+ used in this function, and do the corresponding magic in the
+ epilogue. */
+
+ if (TARGET_ALTIVEC && info->vrsave_mask != 0)
+ {
+ rtx reg, mem, vrsave;
+ int offset;
+
+ /* Get VRSAVE onto a GPR. */
+ reg = gen_rtx_REG (SImode, 12);
+ vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
+ emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
+
+ /* Save VRSAVE. */
+ offset = info->vrsave_save_offset + sp_offset;
+ mem
+ = gen_rtx_MEM (SImode,
+ gen_rtx_PLUS (Pmode, frame_reg_rtx, GEN_INT (offset)));
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+ insn = emit_move_insn (mem, reg);
+
+ /* Include the registers in the mask. */
+ emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
+
+ insn = emit_insn (generate_set_vrsave (reg, info, 0));
+ }
+
+ /* If we use the link register, get it into r0. */
+ if (info->lr_save_p)
+ emit_move_insn (gen_rtx_REG (Pmode, 0),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+
+ /* If we need to save CR, put it into r12. */
+ if (info->cr_save_p && frame_reg_rtx != frame_ptr_rtx)
+ {
+ cr_save_rtx = gen_rtx_REG (SImode, 12);
+ emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ }
+
+ /* Do any required saving of fpr's. If only one or two to save, do
+ it ourselves. Otherwise, call function. */
+ if (saving_FPRs_inline)
+ {
+ int i;
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((regs_ever_live[info->first_fp_reg_save+i]
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ + 8 * i));
+ mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ }
+ else if (info->first_fp_reg_save != 64)
+ {
+ int i;
+ char rname[30];
+ const char *alloc_rname;
+ rtvec p;
+ p = rtvec_alloc (2 + 64 - info->first_fp_reg_save);
+
+ RTVEC_ELT (p, 0) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+ sprintf (rname, "%s%d%s", SAVE_FP_PREFIX,
+ info->first_fp_reg_save - 32, SAVE_FP_SUFFIX);
+ alloc_rname = ggc_strdup (rname);
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ alloc_rname));
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset + 8*i));
+ mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, i + 2) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+
+ /* Save GPRs. This is done as a PARALLEL if we are using
+ the store-multiple instructions. */
+ if (using_store_multiple)
+ {
+ rtvec p, dwarfp;
+ int i;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ dwarfp = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, reg);
+ }
+ insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ else
+ {
+ int i;
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if ((regs_ever_live[info->first_gp_reg_save+i]
+ && ! call_used_regs[info->first_gp_reg_save+i])
+ || (i+info->first_gp_reg_save == PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ {
+ rtx addr, reg, mem;
+ reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ }
+
+ /* ??? There's no need to emit actual instructions here, but it's the
+ easiest way to get the frame unwind information emitted. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ for (i = 0; ; ++i)
+ {
+ rtx addr, reg, mem;
+
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ reg = gen_rtx_REG (reg_mode, regno);
+ addr = plus_constant (frame_reg_rtx,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i);
+ mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ NULL_RTX, NULL_RTX);
+ }
+ }
+
+ /* Save lr if we used it. */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx reg = gen_rtx_REG (Pmode, 0);
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+ /* This should not be of rs6000_sr_alias_set, because of
+ __builtin_return_address. */
+
+ insn = emit_move_insn (mem, reg);
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ reg, gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+ }
+
+ /* Save CR if we use any that must be preserved. */
+ if (info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (SImode, addr);
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ /* If r12 was used to hold the original sp, copy cr into r0 now
+ that it's free. */
+ if (REGNO (frame_reg_rtx) == 12)
+ {
+ cr_save_rtx = gen_rtx_REG (SImode, 0);
+ emit_insn (gen_movesi_from_cr (cr_save_rtx));
+ }
+ insn = emit_move_insn (mem, cr_save_rtx);
+
+ /* Now, there's no way that dwarf2out_frame_debug_expr is going
+ to understand '(unspec:SI [(reg:CC 68) ...] 19)'. But that's
+ OK. All we have to do is specify that _one_ condition code
+ register is saved in this stack slot. The thrower's epilogue
+ will then restore all the call-saved registers.
+ We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
+ rs6000_frame_related (insn, frame_ptr_rtx, info->total_size,
+ cr_save_rtx, gen_rtx_REG (SImode, CR2_REGNO));
+ }
+
+ /* Update stack and set back pointer unless this is V.4,
+ for which it was done previously. */
+ if (info->push_p && DEFAULT_ABI != ABI_V4)
+ rs6000_emit_allocate_stack (info->total_size, FALSE);
+
+ /* Set frame pointer, if needed. */
+ if (frame_pointer_needed)
+ {
+ insn = emit_move_insn (gen_rtx_REG (reg_mode, FRAME_POINTER_REGNUM),
+ sp_reg_rtx);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ /* If we are using PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
+ if ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
+ || (DEFAULT_ABI == ABI_V4 && flag_pic == 1
+ && regs_ever_live[PIC_OFFSET_TABLE_REGNUM]))
+ {
+ /* If emit_load_toc_table will use the link register, we need to save
+ it. We use R11 for this purpose because emit_load_toc_table
+ can use register 0. This allows us to use a plain 'blr' to return
+ from the procedure more often. */
+ int save_LR_around_toc_setup = (TARGET_ELF && flag_pic != 0
+ && ! info->lr_save_p
+ && EXIT_BLOCK_PTR->pred != NULL);
+ if (save_LR_around_toc_setup)
+ emit_move_insn (gen_rtx_REG (Pmode, 11),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM));
+
+ rs6000_emit_load_toc_table (TRUE);
+
+ if (save_LR_around_toc_setup)
+ emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
+ gen_rtx_REG (Pmode, 11));
+ }
+
+ if (DEFAULT_ABI == ABI_DARWIN
+ && flag_pic && current_function_uses_pic_offset_table)
+ {
+ rtx dest = gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM);
+
+ rs6000_maybe_dead (emit_insn (gen_load_macho_picbase (dest)));
+
+ rs6000_maybe_dead (
+ emit_move_insn (gen_rtx_REG (Pmode, PIC_OFFSET_TABLE_REGNUM),
+ gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)));
+ }
+}
+
+/* Write function prologue. */
+
+static void
+rs6000_output_function_prologue (file, size)
+ FILE *file;
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED;
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (TARGET_DEBUG_STACK)
+ debug_stack_info (info);
+
+ /* Write .extern for any function we will call to save and restore
+ fp values. */
+ if (info->first_fp_reg_save < 64
+ && !FP_SAVE_INLINE (info->first_fp_reg_save))
+ fprintf (file, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
+ SAVE_FP_PREFIX, info->first_fp_reg_save - 32, SAVE_FP_SUFFIX,
+ RESTORE_FP_PREFIX, info->first_fp_reg_save - 32,
+ RESTORE_FP_SUFFIX);
+
+ /* Write .extern for AIX common mode routines, if needed. */
+ if (! TARGET_POWER && ! TARGET_POWERPC && ! common_mode_defined)
+ {
+ fputs ("\t.extern __mulh\n", file);
+ fputs ("\t.extern __mull\n", file);
+ fputs ("\t.extern __divss\n", file);
+ fputs ("\t.extern __divus\n", file);
+ fputs ("\t.extern __quoss\n", file);
+ fputs ("\t.extern __quous\n", file);
+ common_mode_defined = 1;
+ }
+
+ if (! HAVE_prologue)
+ {
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start and end of
+ the "toplevel" insn chain. */
+ emit_note (0, NOTE_INSN_DELETED);
+ rs6000_emit_prologue ();
+ emit_note (0, NOTE_INSN_DELETED);
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (get_insns (), 100);
+ final (get_insns (), file, FALSE, FALSE);
+ end_sequence ();
+ }
+
+ rs6000_pic_labelno++;
+}
+
+/* Emit function epilogue as insns.
+
+ At present, dwarf2out_frame_debug_expr doesn't understand
+ register restores, so we don't bother setting RTX_FRAME_RELATED_P
+ anywhere in the epilogue. Most of the insns below would in any case
+ need special notes to explain where r11 is in relation to the stack. */
+
+void
+rs6000_emit_epilogue (sibcall)
+ int sibcall;
+{
+ rs6000_stack_t *info;
+ int restoring_FPRs_inline;
+ int using_load_multiple;
+ int using_mfcr_multiple;
+ int use_backchain_to_restore_sp;
+ int sp_offset = 0;
+ rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
+ rtx frame_reg_rtx = sp_reg_rtx;
+ enum machine_mode reg_mode = TARGET_POWERPC64 ? DImode : SImode;
+ int reg_size = TARGET_POWERPC64 ? 8 : 4;
+ int i;
+
+ info = rs6000_stack_info ();
+ using_load_multiple = (TARGET_MULTIPLE && ! TARGET_POWERPC64
+ && info->first_gp_reg_save < 31);
+ restoring_FPRs_inline = (sibcall
+ || current_function_calls_eh_return
+ || info->first_fp_reg_save == 64
+ || FP_SAVE_INLINE (info->first_fp_reg_save));
+ use_backchain_to_restore_sp = (frame_pointer_needed
+ || current_function_calls_alloca
+ || info->total_size > 32767);
+ using_mfcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
+ || rs6000_cpu == PROCESSOR_PPC603
+ || rs6000_cpu == PROCESSOR_PPC750
+ || optimize_size);
+
+ /* If we have a frame pointer, a call to alloca, or a large stack
+ frame, restore the old stack pointer using the backchain. Otherwise,
+ we know what size to update it with. */
+ if (use_backchain_to_restore_sp)
+ {
+ /* Under V.4, don't reset the stack pointer until after we're done
+ loading the saved registers. */
+ if (DEFAULT_ABI == ABI_V4)
+ frame_reg_rtx = gen_rtx_REG (Pmode, 11);
+
+ emit_move_insn (frame_reg_rtx,
+ gen_rtx_MEM (Pmode, sp_reg_rtx));
+
+ }
+ else if (info->push_p)
+ {
+ if (DEFAULT_ABI == ABI_V4)
+ sp_offset = info->total_size;
+ else
+ {
+ emit_insn (TARGET_32BIT
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (info->total_size)));
+ }
+ }
+
+ /* Restore AltiVec registers if needed. */
+ if (TARGET_ALTIVEC_ABI && info->altivec_size != 0)
+ {
+ int i;
+
+ for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
+ if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
+ {
+ rtx addr, areg, mem;
+
+ areg = gen_rtx_REG (Pmode, 0);
+ emit_move_insn
+ (areg, GEN_INT (info->altivec_save_offset
+ + sp_offset
+ + 16 * (i - info->first_altivec_reg_save)));
+
+ /* AltiVec addressing mode is [reg+reg]. */
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
+ mem = gen_rtx_MEM (V4SImode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (V4SImode, i), mem);
+ }
+ }
+
+ /* Restore VRSAVE if needed. */
+ if (TARGET_ALTIVEC_ABI && info->vrsave_mask != 0)
+ {
+ rtx addr, mem, reg;
+
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->vrsave_save_offset + sp_offset));
+ mem = gen_rtx_MEM (SImode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+ reg = gen_rtx_REG (SImode, 12);
+ emit_move_insn (reg, mem);
+
+ emit_insn (generate_set_vrsave (reg, info, 1));
+ }
+
+ /* Get the old lr if we saved it. */
+ if (info->lr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->lr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (Pmode, addr);
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (Pmode, 0), mem);
+ }
+
+ /* Get the old cr if we saved it. */
+ if (info->cr_save_p)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->cr_save_offset + sp_offset));
+ rtx mem = gen_rtx_MEM (SImode, addr);
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (SImode, 12), mem);
+ }
+
+ /* Set LR here to try to overlap restores below. */
+ if (info->lr_save_p)
+ emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM),
+ gen_rtx_REG (Pmode, 0));
+
+ /* Load exception handler data registers, if needed. */
+ if (current_function_calls_eh_return)
+ {
+ unsigned int i, regno;
+
+ for (i = 0; ; ++i)
+ {
+ rtx addr, mem;
+
+ regno = EH_RETURN_DATA_REGNO (i);
+ if (regno == INVALID_REGNUM)
+ break;
+
+ addr = plus_constant (frame_reg_rtx,
+ info->ehrd_offset + sp_offset
+ + reg_size * (int) i);
+ mem = gen_rtx_MEM (reg_mode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
+ }
+ }
+
+ /* Restore GPRs. This is done as a PARALLEL if we are using
+ the load-multiple instructions. */
+ if (using_load_multiple)
+ {
+ rtvec p;
+ p = rtvec_alloc (32 - info->first_gp_reg_save);
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, i) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
+ mem);
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+ else
+ for (i = 0; i < 32 - info->first_gp_reg_save; i++)
+ if ((regs_ever_live[info->first_gp_reg_save+i]
+ && ! call_used_regs[info->first_gp_reg_save+i])
+ || (i+info->first_gp_reg_save == PIC_OFFSET_TABLE_REGNUM
+ && ((DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (DEFAULT_ABI == ABI_DARWIN && flag_pic))))
+ {
+ rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->gp_save_offset
+ + sp_offset
+ + reg_size * i));
+ rtx mem = gen_rtx_MEM (reg_mode, addr);
+
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (reg_mode,
+ info->first_gp_reg_save + i),
+ mem);
+ }
+
+ /* Restore fpr's if we need to do it without calling a function. */
+ if (restoring_FPRs_inline)
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ if ((regs_ever_live[info->first_fp_reg_save+i]
+ && ! call_used_regs[info->first_fp_reg_save+i]))
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
+ GEN_INT (info->fp_save_offset
+ + sp_offset
+ + 8 * i));
+ mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ emit_move_insn (gen_rtx_REG (DFmode,
+ info->first_fp_reg_save + i),
+ mem);
+ }
+
+ /* If we saved cr, restore it here. Just those that were used. */
+ if (info->cr_save_p)
+ {
+ rtx r12_rtx = gen_rtx_REG (SImode, 12);
+ int count = 0;
+
+ if (using_mfcr_multiple)
+ {
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ count++;
+ if (count == 0)
+ abort ();
+ }
+
+ if (using_mfcr_multiple && count > 1)
+ {
+ rtvec p;
+ int ndx;
+
+ p = rtvec_alloc (count);
+
+ ndx = 0;
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ {
+ rtvec r = rtvec_alloc (2);
+ RTVEC_ELT (r, 0) = r12_rtx;
+ RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
+ RTVEC_ELT (p, ndx) =
+ gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO+i),
+ gen_rtx_UNSPEC (CCmode, r, 20));
+ ndx++;
+ }
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ if (ndx != count)
+ abort ();
+ }
+ else
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[CR0_REGNO+i] && ! call_used_regs[CR0_REGNO+i])
+ {
+ emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode,
+ CR0_REGNO+i),
+ r12_rtx));
+ }
+ }
+
+ /* If this is V.4, unwind the stack pointer after all of the loads
+ have been done. We need to emit a block here so that sched
+ doesn't decide to move the sp change before the register restores
+ (which may not have any obvious dependency on the stack). This
+ doesn't hurt performance, because there is no scheduling that can
+ be done after this point. */
+ if (DEFAULT_ABI == ABI_V4)
+ {
+ if (frame_reg_rtx != sp_reg_rtx)
+ rs6000_emit_stack_tie ();
+
+ if (use_backchain_to_restore_sp)
+ {
+ emit_move_insn (sp_reg_rtx, frame_reg_rtx);
+ }
+ else if (sp_offset != 0)
+ {
+ emit_insn (Pmode == SImode
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset))
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx,
+ GEN_INT (sp_offset)));
+ }
+ }
+
+ if (current_function_calls_eh_return)
+ {
+ rtx sa = EH_RETURN_STACKADJ_RTX;
+ emit_insn (Pmode == SImode
+ ? gen_addsi3 (sp_reg_rtx, sp_reg_rtx, sa)
+ : gen_adddi3 (sp_reg_rtx, sp_reg_rtx, sa));
+ }
+
+ if (!sibcall)
+ {
+ rtvec p;
+ if (! restoring_FPRs_inline)
+ p = rtvec_alloc (3 + 64 - info->first_fp_reg_save);
+ else
+ p = rtvec_alloc (2);
+
+ RTVEC_ELT (p, 0) = gen_rtx_RETURN (VOIDmode);
+ RTVEC_ELT (p, 1) = gen_rtx_USE (VOIDmode,
+ gen_rtx_REG (Pmode,
+ LINK_REGISTER_REGNUM));
+
+ /* If we have to restore more than two FP registers, branch to the
+ restore function. It will return to our caller. */
+ if (! restoring_FPRs_inline)
+ {
+ int i;
+ char rname[30];
+ const char *alloc_rname;
+
+ sprintf (rname, "%s%d%s", RESTORE_FP_PREFIX,
+ info->first_fp_reg_save - 32, RESTORE_FP_SUFFIX);
+ alloc_rname = ggc_strdup (rname);
+ RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode,
+ gen_rtx_SYMBOL_REF (Pmode,
+ alloc_rname));
+
+ for (i = 0; i < 64 - info->first_fp_reg_save; i++)
+ {
+ rtx addr, mem;
+ addr = gen_rtx_PLUS (Pmode, sp_reg_rtx,
+ GEN_INT (info->fp_save_offset + 8*i));
+ mem = gen_rtx_MEM (DFmode, addr);
+ set_mem_alias_set (mem, rs6000_sr_alias_set);
+
+ RTVEC_ELT (p, i+3) =
+ gen_rtx_SET (VOIDmode,
+ gen_rtx_REG (DFmode, info->first_fp_reg_save + i),
+ mem);
+ }
+ }
+
+ emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
+ }
+}
+
+/* Write function epilogue. */
+
+static void
+rs6000_output_function_epilogue (file, size)
+ FILE *file;
+ HOST_WIDE_INT size ATTRIBUTE_UNUSED;
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+ int optional_tbtab = (optimize_size || TARGET_ELF) ? 0 : 1;
+
+ if (! HAVE_epilogue)
+ {
+ rtx insn = get_last_insn ();
+ /* If the last insn was a BARRIER, we don't have to write anything except
+ the trace table. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn == 0 || GET_CODE (insn) != BARRIER)
+ {
+ /* This is slightly ugly, but at least we don't have two
+ copies of the epilogue-emitting code. */
+ start_sequence ();
+
+ /* A NOTE_INSN_DELETED is supposed to be at the start
+ and end of the "toplevel" insn chain. */
+ emit_note (0, NOTE_INSN_DELETED);
+ rs6000_emit_epilogue (FALSE);
+ emit_note (0, NOTE_INSN_DELETED);
+
+ if (TARGET_DEBUG_STACK)
+ debug_rtx_list (get_insns (), 100);
+ final (get_insns (), file, FALSE, FALSE);
+ end_sequence ();
+ }
+ }
+
+ /* Output a traceback table here. See /usr/include/sys/debug.h for info
+ on its format.
+
+ We don't output a traceback table if -finhibit-size-directive was
+ used. The documentation for -finhibit-size-directive reads
+ ``don't output a @code{.size} assembler directive, or anything
+ else that would cause trouble if the function is split in the
+ middle, and the two halves are placed at locations far apart in
+ memory.'' The traceback table has this property, since it
+ includes the offset from the start of the function to the
+ traceback table itself.
+
+ System V.4 Powerpc's (and the embedded ABI derived from it) use a
+ different traceback table. */
+ if (DEFAULT_ABI == ABI_AIX && ! flag_inhibit_size_directive)
+ {
+ const char *fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+ const char *language_string = lang_hooks.name;
+ int fixed_parms = 0, float_parms = 0, parm_info = 0;
+ int i;
+
+ while (*fname == '.') /* V.4 encodes . in the name */
+ fname++;
+
+ /* Need label immediately before tbtab, so we can compute its offset
+ from the function start. */
+ if (*fname == '*')
+ ++fname;
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+ ASM_OUTPUT_LABEL (file, fname);
+
+ /* The .tbtab pseudo-op can only be used for the first eight
+ expressions, since it can't handle the possibly variable
+ length fields that follow. However, if you omit the optional
+ fields, the assembler outputs zeros for all optional fields
+ anyways, giving each variable length field is minimum length
+ (as defined in sys/debug.h). Thus we can not use the .tbtab
+ pseudo-op at all. */
+
+ /* An all-zero word flags the start of the tbtab, for debuggers
+ that have to find it by searching forward from the entry
+ point or from the current pc. */
+ fputs ("\t.long 0\n", file);
+
+ /* Tbtab format type. Use format type 0. */
+ fputs ("\t.byte 0,", file);
+
+ /* Language type. Unfortunately, there doesn't seem to be any
+ official way to get this info, so we use language_string. C
+ is 0. C++ is 9. No number defined for Obj-C, so use the
+ value for C for now. There is no official value for Java,
+ although IBM appears to be using 13. There is no official value
+ for Chill, so we've chosen 44 pseudo-randomly. */
+ if (! strcmp (language_string, "GNU C")
+ || ! strcmp (language_string, "GNU Objective-C"))
+ i = 0;
+ else if (! strcmp (language_string, "GNU F77"))
+ i = 1;
+ else if (! strcmp (language_string, "GNU Ada"))
+ i = 3;
+ else if (! strcmp (language_string, "GNU Pascal"))
+ i = 2;
+ else if (! strcmp (language_string, "GNU C++"))
+ i = 9;
+ else if (! strcmp (language_string, "GNU Java"))
+ i = 13;
+ else if (! strcmp (language_string, "GNU CHILL"))
+ i = 44;
+ else
+ abort ();
+ fprintf (file, "%d,", i);
+
+ /* 8 single bit fields: global linkage (not set for C extern linkage,
+ apparently a PL/I convention?), out-of-line epilogue/prologue, offset
+ from start of procedure stored in tbtab, internal function, function
+ has controlled storage, function has no toc, function uses fp,
+ function logs/aborts fp operations. */
+ /* Assume that fp operations are used if any fp reg must be saved. */
+ fprintf (file, "%d,",
+ (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
+
+ /* 6 bitfields: function is interrupt handler, name present in
+ proc table, function calls alloca, on condition directives
+ (controls stack walks, 3 bits), saves condition reg, saves
+ link reg. */
+ /* The `function calls alloca' bit seems to be set whenever reg 31 is
+ set up as a frame pointer, even when there is no alloca call. */
+ fprintf (file, "%d,",
+ ((optional_tbtab << 6)
+ | ((optional_tbtab & frame_pointer_needed) << 5)
+ | (info->cr_save_p << 1)
+ | (info->lr_save_p)));
+
+ /* 3 bitfields: saves backchain, fixup code, number of fpr saved
+ (6 bits). */
+ fprintf (file, "%d,",
+ (info->push_p << 7) | (64 - info->first_fp_reg_save));
+
+ /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
+ fprintf (file, "%d,", (32 - first_reg_to_save ()));
+
+ if (optional_tbtab)
+ {
+ /* Compute the parameter info from the function decl argument
+ list. */
+ tree decl;
+ int next_parm_info_bit = 31;
+
+ for (decl = DECL_ARGUMENTS (current_function_decl);
+ decl; decl = TREE_CHAIN (decl))
+ {
+ rtx parameter = DECL_INCOMING_RTL (decl);
+ enum machine_mode mode = GET_MODE (parameter);
+
+ if (GET_CODE (parameter) == REG)
+ {
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ int bits;
+
+ float_parms++;
+
+ if (mode == SFmode)
+ bits = 0x2;
+ else if (mode == DFmode)
+ bits = 0x3;
+ else
+ abort ();
+
+ /* If only one bit will fit, don't or in this entry. */
+ if (next_parm_info_bit > 0)
+ parm_info |= (bits << (next_parm_info_bit - 1));
+ next_parm_info_bit -= 2;
+ }
+ else
+ {
+ fixed_parms += ((GET_MODE_SIZE (mode)
+ + (UNITS_PER_WORD - 1))
+ / UNITS_PER_WORD);
+ next_parm_info_bit -= 1;
+ }
+ }
+ }
+ }
+
+ /* Number of fixed point parameters. */
+ /* This is actually the number of words of fixed point parameters; thus
+ an 8 byte struct counts as 2; and thus the maximum value is 8. */
+ fprintf (file, "%d,", fixed_parms);
+
+ /* 2 bitfields: number of floating point parameters (7 bits), parameters
+ all on stack. */
+ /* This is actually the number of fp registers that hold parameters;
+ and thus the maximum value is 13. */
+ /* Set parameters on stack bit if parameters are not in their original
+ registers, regardless of whether they are on the stack? Xlc
+ seems to set the bit when not optimizing. */
+ fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
+
+ if (! optional_tbtab)
+ return;
+
+ /* Optional fields follow. Some are variable length. */
+
+ /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
+ 11 double float. */
+ /* There is an entry for each parameter in a register, in the order that
+ they occur in the parameter list. Any intervening arguments on the
+ stack are ignored. If the list overflows a long (max possible length
+ 34 bits) then completely leave off all elements that don't fit. */
+ /* Only emit this long if there was at least one parameter. */
+ if (fixed_parms || float_parms)
+ fprintf (file, "\t.long %d\n", parm_info);
+
+ /* Offset from start of code to tb table. */
+ fputs ("\t.long ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
+#if TARGET_AIX
+ RS6000_OUTPUT_BASENAME (file, fname);
+#else
+ assemble_name (file, fname);
+#endif
+ fputs ("-.", file);
+#if TARGET_AIX
+ RS6000_OUTPUT_BASENAME (file, fname);
+#else
+ assemble_name (file, fname);
+#endif
+ putc ('\n', file);
+
+ /* Interrupt handler mask. */
+ /* Omit this long, since we never set the interrupt handler bit
+ above. */
+
+ /* Number of CTL (controlled storage) anchors. */
+ /* Omit this long, since the has_ctl bit is never set above. */
+
+ /* Displacement into stack of each CTL anchor. */
+ /* Omit this list of longs, because there are no CTL anchors. */
+
+ /* Length of function name. */
+ fprintf (file, "\t.short %d\n", (int) strlen (fname));
+
+ /* Function name. */
+ assemble_string (fname, strlen (fname));
+
+ /* Register for alloca automatic storage; this is always reg 31.
+ Only emit this if the alloca bit was set above. */
+ if (frame_pointer_needed)
+ fputs ("\t.byte 31\n", file);
+ }
+ return;
+}
+
+/* A C compound statement that outputs the assembler code for a thunk
+ function, used to implement C++ virtual function calls with
+ multiple inheritance. The thunk acts as a wrapper around a virtual
+ function, adjusting the implicit object parameter before handing
+ control off to the real function.
+
+ First, emit code to add the integer DELTA to the location that
+ contains the incoming first argument. Assume that this argument
+ contains a pointer, and is the one used to pass the `this' pointer
+ in C++. This is the incoming argument *before* the function
+ prologue, e.g. `%o0' on a sparc. The addition must preserve the
+ values of all other incoming arguments.
+
+ After the addition, emit code to jump to FUNCTION, which is a
+ `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
+ not touch the return address. Hence returning from FUNCTION will
+ return to whoever called the current `thunk'.
+
+ The effect must be as if FUNCTION had been called directly with the
+ adjusted first argument. This macro is responsible for emitting
+ all of the code for a thunk function; output_function_prologue()
+ and output_function_epilogue() are not invoked.
+
+ The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
+ been extracted from it.) It might possibly be useful on some
+ targets, but probably not.
+
+ If you do not define this macro, the target-independent code in the
+ C++ frontend will generate a less efficient heavyweight thunk that
+ calls FUNCTION instead of jumping to it. The generic approach does
+ not support varargs. */
+
+void
+output_mi_thunk (file, thunk_fndecl, delta, function)
+ FILE *file;
+ tree thunk_fndecl ATTRIBUTE_UNUSED;
+ int delta;
+ tree function;
+{
+ const char *this_reg =
+ reg_names[ aggregate_value_p (TREE_TYPE (TREE_TYPE (function))) ? 4 : 3 ];
+ const char *prefix;
+ const char *fname;
+ const char *r0 = reg_names[0];
+ const char *toc = reg_names[2];
+ const char *schain = reg_names[11];
+ const char *r12 = reg_names[12];
+ char buf[512];
+ static int labelno = 0;
+
+ /* Small constants that can be done by one add instruction. */
+ if (delta >= -32768 && delta <= 32767)
+ {
+ if (! TARGET_NEW_MNEMONICS)
+ fprintf (file, "\tcal %s,%d(%s)\n", this_reg, delta, this_reg);
+ else
+ fprintf (file, "\taddi %s,%s,%d\n", this_reg, this_reg, delta);
+ }
+
+ /* Large constants that can be done by one addis instruction. */
+ else if ((delta & 0xffff) == 0 && num_insns_constant_wide (delta) == 1)
+ asm_fprintf (file, "\t{cau|addis} %s,%s,%d\n", this_reg, this_reg,
+ delta >> 16);
+
+ /* 32-bit constants that can be done by an add and addis instruction. */
+ else if (TARGET_32BIT || num_insns_constant_wide (delta) == 1)
+ {
+ /* Break into two pieces, propagating the sign bit from the low
+ word to the upper word. */
+ int delta_high = delta >> 16;
+ int delta_low = delta & 0xffff;
+ if ((delta_low & 0x8000) != 0)
+ {
+ delta_high++;
+ delta_low = (delta_low ^ 0x8000) - 0x8000; /* sign extend */
+ }
+
+ asm_fprintf (file, "\t{cau|addis} %s,%s,%d\n", this_reg, this_reg,
+ delta_high);
+
+ if (! TARGET_NEW_MNEMONICS)
+ fprintf (file, "\tcal %s,%d(%s)\n", this_reg, delta_low, this_reg);
+ else
+ fprintf (file, "\taddi %s,%s,%d\n", this_reg, this_reg, delta_low);
+ }
+
+ /* 64-bit constants, fixme */
+ else
+ abort ();
+
+ /* Get the prefix in front of the names. */
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_AIX:
+ prefix = ".";
+ break;
+
+ case ABI_V4:
+ case ABI_AIX_NODESC:
+ prefix = "";
+ break;
+ }
+
+ /* If the function is compiled in this module, jump to it directly.
+ Otherwise, load up its address and jump to it. */
+
+ fname = XSTR (XEXP (DECL_RTL (function), 0), 0);
+
+ if (current_file_function_operand (XEXP (DECL_RTL (function), 0), VOIDmode)
+ && ! lookup_attribute ("longcall",
+ TYPE_ATTRIBUTES (TREE_TYPE (function))))
+ {
+ fprintf (file, "\tb %s", prefix);
+ assemble_name (file, fname);
+ if (DEFAULT_ABI == ABI_V4 && flag_pic) fputs ("@local", file);
+ putc ('\n', file);
+ }
+
+ else
+ {
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_AIX:
+ /* Set up a TOC entry for the function. */
+ ASM_GENERATE_INTERNAL_LABEL (buf, "Lthunk", labelno);
+ toc_section ();
+ ASM_OUTPUT_INTERNAL_LABEL (file, "Lthunk", labelno);
+ labelno++;
+
+ if (TARGET_MINIMAL_TOC)
+ fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
+ else
+ {
+ fputs ("\t.tc ", file);
+ assemble_name (file, fname);
+ fputs ("[TC],", file);
+ }
+ assemble_name (file, fname);
+ putc ('\n', file);
+ text_section ();
+ if (TARGET_MINIMAL_TOC)
+ asm_fprintf (file, (TARGET_32BIT)
+ ? "\t{l|lwz} %s,%s(%s)\n" : "\tld %s,%s(%s)\n", r12,
+ TARGET_ELF ? ".LCTOC0@toc" : ".LCTOC..1", toc);
+ asm_fprintf (file, (TARGET_32BIT) ? "\t{l|lwz} %s," : "\tld %s,", r12);
+ assemble_name (file, buf);
+ if (TARGET_ELF && TARGET_MINIMAL_TOC)
+ fputs ("-(.LCTOC1)", file);
+ asm_fprintf (file, "(%s)\n", TARGET_MINIMAL_TOC ? r12 : toc);
+ asm_fprintf (file,
+ (TARGET_32BIT) ? "\t{l|lwz} %s,0(%s)\n" : "\tld %s,0(%s)\n",
+ r0, r12);
+
+ asm_fprintf (file,
+ (TARGET_32BIT) ? "\t{l|lwz} %s,4(%s)\n" : "\tld %s,8(%s)\n",
+ toc, r12);
+
+ asm_fprintf (file, "\tmtctr %s\n", r0);
+ asm_fprintf (file,
+ (TARGET_32BIT) ? "\t{l|lwz} %s,8(%s)\n" : "\tld %s,16(%s)\n",
+ schain, r12);
+
+ asm_fprintf (file, "\tbctr\n");
+ break;
+
+ case ABI_AIX_NODESC:
+ case ABI_V4:
+ fprintf (file, "\tb %s", prefix);
+ assemble_name (file, fname);
+ if (flag_pic) fputs ("@plt", file);
+ putc ('\n', file);
+ break;
+
+#if TARGET_MACHO
+ case ABI_DARWIN:
+ fprintf (file, "\tb %s", prefix);
+ if (flag_pic && !machopic_name_defined_p (fname))
+ assemble_name (file, machopic_stub_name (fname));
+ else
+ assemble_name (file, fname);
+ putc ('\n', file);
+ break;
+#endif
+ }
+ }
+}
+
+
+/* A quick summary of the various types of 'constant-pool tables'
+ under PowerPC:
+
+ Target Flags Name One table per
+ AIX (none) AIX TOC object file
+ AIX -mfull-toc AIX TOC object file
+ AIX -mminimal-toc AIX minimal TOC translation unit
+ SVR4/EABI (none) SVR4 SDATA object file
+ SVR4/EABI -fpic SVR4 pic object file
+ SVR4/EABI -fPIC SVR4 PIC translation unit
+ SVR4/EABI -mrelocatable EABI TOC function
+ SVR4/EABI -maix AIX TOC object file
+ SVR4/EABI -maix -mminimal-toc
+ AIX minimal TOC translation unit
+
+ Name Reg. Set by entries contains:
+ made by addrs? fp? sum?
+
+ AIX TOC 2 crt0 as Y option option
+ AIX minimal TOC 30 prolog gcc Y Y option
+ SVR4 SDATA 13 crt0 gcc N Y N
+ SVR4 pic 30 prolog ld Y not yet N
+ SVR4 PIC 30 prolog gcc Y option option
+ EABI TOC 30 prolog gcc Y option option
+
+*/
+
+/* Hash table stuff for keeping track of TOC entries. */
+
+struct toc_hash_struct
+{
+ /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
+ ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
+ rtx key;
+ enum machine_mode key_mode;
+ int labelno;
+};
+
+static htab_t toc_hash_table;
+
+/* Hash functions for the hash table. */
+
+static unsigned
+rs6000_hash_constant (k)
+ rtx k;
+{
+ unsigned result = (GET_CODE (k) << 3) ^ GET_MODE (k);
+ const char *format = GET_RTX_FORMAT (GET_CODE (k));
+ int flen = strlen (format);
+ int fidx;
+
+ if (GET_CODE (k) == LABEL_REF)
+ return result * 1231 + X0INT (XEXP (k, 0), 3);
+
+ if (GET_CODE (k) == CONST_DOUBLE)
+ fidx = 1;
+ else if (GET_CODE (k) == CODE_LABEL)
+ fidx = 3;
+ else
+ fidx = 0;
+
+ for (; fidx < flen; fidx++)
+ switch (format[fidx])
+ {
+ case 's':
+ {
+ unsigned i, len;
+ const char *str = XSTR (k, fidx);
+ len = strlen (str);
+ result = result * 613 + len;
+ for (i = 0; i < len; i++)
+ result = result * 613 + (unsigned) str[i];
+ break;
+ }
+ case 'u':
+ case 'e':
+ result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
+ break;
+ case 'i':
+ case 'n':
+ result = result * 613 + (unsigned) XINT (k, fidx);
+ break;
+ case 'w':
+ if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
+ result = result * 613 + (unsigned) XWINT (k, fidx);
+ else
+ {
+ size_t i;
+ for (i = 0; i < sizeof(HOST_WIDE_INT)/sizeof(unsigned); i++)
+ result = result * 613 + (unsigned) (XWINT (k, fidx)
+ >> CHAR_BIT * i);
+ }
+ break;
+ default:
+ abort ();
+ }
+ return result;
+}
+
+static unsigned
+toc_hash_function (hash_entry)
+ const void * hash_entry;
+{
+ const struct toc_hash_struct *thc =
+ (const struct toc_hash_struct *) hash_entry;
+ return rs6000_hash_constant (thc->key) ^ thc->key_mode;
+}
+
+/* Compare H1 and H2 for equivalence. */
+
+static int
+toc_hash_eq (h1, h2)
+ const void * h1;
+ const void * h2;
+{
+ rtx r1 = ((const struct toc_hash_struct *) h1)->key;
+ rtx r2 = ((const struct toc_hash_struct *) h2)->key;
+
+ if (((const struct toc_hash_struct *) h1)->key_mode
+ != ((const struct toc_hash_struct *) h2)->key_mode)
+ return 0;
+
+ /* Gotcha: One of these const_doubles will be in memory.
+ The other may be on the constant-pool chain.
+ So rtx_equal_p will think they are different... */
+ if (r1 == r2)
+ return 1;
+ if (GET_CODE (r1) != GET_CODE (r2)
+ || GET_MODE (r1) != GET_MODE (r2))
+ return 0;
+ if (GET_CODE (r1) == CONST_DOUBLE)
+ {
+ int format_len = strlen (GET_RTX_FORMAT (CONST_DOUBLE));
+ int i;
+ for (i = 1; i < format_len; i++)
+ if (XWINT (r1, i) != XWINT (r2, i))
+ return 0;
+
+ return 1;
+ }
+ else if (GET_CODE (r1) == LABEL_REF)
+ return (CODE_LABEL_NUMBER (XEXP (r1, 0))
+ == CODE_LABEL_NUMBER (XEXP (r2, 0)));
+ else
+ return rtx_equal_p (r1, r2);
+}
+
+/* Mark the hash table-entry HASH_ENTRY. */
+
+static int
+toc_hash_mark_entry (hash_slot, unused)
+ void ** hash_slot;
+ void * unused ATTRIBUTE_UNUSED;
+{
+ const struct toc_hash_struct * hash_entry =
+ *(const struct toc_hash_struct **) hash_slot;
+ rtx r = hash_entry->key;
+ ggc_set_mark (hash_entry);
+ /* For CODE_LABELS, we don't want to drag in the whole insn chain... */
+ if (GET_CODE (r) == LABEL_REF)
+ {
+ ggc_set_mark (r);
+ ggc_set_mark (XEXP (r, 0));
+ }
+ else
+ ggc_mark_rtx (r);
+ return 1;
+}
+
+/* Mark all the elements of the TOC hash-table *HT. */
+
+static void
+toc_hash_mark_table (vht)
+ void *vht;
+{
+ htab_t *ht = vht;
+
+ htab_traverse (*ht, toc_hash_mark_entry, (void *)0);
+}
+
+/* These are the names given by the C++ front-end to vtables, and
+ vtable-like objects. Ideally, this logic should not be here;
+ instead, there should be some programmatic way of inquiring as
+ to whether or not an object is a vtable. */
+
+#define VTABLE_NAME_P(NAME) \
+ (strncmp ("_vt.", name, strlen("_vt.")) == 0 \
+ || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
+ || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
+ || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
+
+void
+rs6000_output_symbol_ref (file, x)
+ FILE *file;
+ rtx x;
+{
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ const char *name = XSTR (x, 0);
+
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ }
+ else
+ assemble_name (file, name);
+}
+
+/* Output a TOC entry. We derive the entry name from what is being
+ written. */
+
+void
+output_toc (file, x, labelno, mode)
+ FILE *file;
+ rtx x;
+ int labelno;
+ enum machine_mode mode;
+{
+ char buf[256];
+ const char *name = buf;
+ const char *real_name;
+ rtx base = x;
+ int offset = 0;
+
+ if (TARGET_NO_TOC)
+ abort ();
+
+ /* When the linker won't eliminate them, don't output duplicate
+ TOC entries (this happens on AIX if there is any kind of TOC,
+ and on SVR4 under -fPIC or -mrelocatable). */
+ if (TARGET_TOC)
+ {
+ struct toc_hash_struct *h;
+ void * * found;
+
+ h = ggc_alloc (sizeof (*h));
+ h->key = x;
+ h->key_mode = mode;
+ h->labelno = labelno;
+
+ found = htab_find_slot (toc_hash_table, h, 1);
+ if (*found == NULL)
+ *found = h;
+ else /* This is indeed a duplicate.
+ Set this label equal to that label. */
+ {
+ fputs ("\t.set ", file);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d,", labelno);
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
+ fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
+ found)->labelno));
+ return;
+ }
+ }
+
+ /* If we're going to put a double constant in the TOC, make sure it's
+ aligned properly when strict alignment is on. */
+ if (GET_CODE (x) == CONST_DOUBLE
+ && STRICT_ALIGNMENT
+ && GET_MODE_BITSIZE (mode) >= 64
+ && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
+ ASM_OUTPUT_ALIGN (file, 3);
+ }
+
+ ASM_OUTPUT_INTERNAL_LABEL (file, "LC", labelno);
+
+ /* Handle FP constants specially. Note that if we have a minimal
+ TOC, things we put here aren't actually in the TOC, so we can allow
+ FP constants. */
+ if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
+ {
+ REAL_VALUE_TYPE rv;
+ long k[2];
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],", k[0], k[1]);
+ fprintf (file, "0x%lx%08lx\n", k[0], k[1]);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FD_%lx_%lx[TC],", k[0], k[1]);
+ fprintf (file, "0x%lx,0x%lx\n", k[0], k[1]);
+ return;
+ }
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
+ {
+ REAL_VALUE_TYPE rv;
+ long l;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l);
+ fprintf (file, "0x%lx00000000\n", l);
+ return;
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc FS_%lx[TC],", l);
+ fprintf (file, "0x%lx\n", l);
+ return;
+ }
+ }
+ else if (GET_MODE (x) == VOIDmode
+ && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
+ {
+ unsigned HOST_WIDE_INT low;
+ HOST_WIDE_INT high;
+
+ if (GET_CODE (x) == CONST_DOUBLE)
+ {
+ low = CONST_DOUBLE_LOW (x);
+ high = CONST_DOUBLE_HIGH (x);
+ }
+ else
+#if HOST_BITS_PER_WIDE_INT == 32
+ {
+ low = INTVAL (x);
+ high = (low & 0x80000000) ? ~0 : 0;
+ }
+#else
+ {
+ low = INTVAL (x) & 0xffffffff;
+ high = (HOST_WIDE_INT) INTVAL (x) >> 32;
+ }
+#endif
+
+ /* TOC entries are always Pmode-sized, but since this
+ is a bigendian machine then if we're putting smaller
+ integer constants in the TOC we have to pad them.
+ (This is still a win over putting the constants in
+ a separate constant pool, because then we'd have
+ to have both a TOC entry _and_ the actual constant.)
+
+ For a 32-bit target, CONST_INT values are loaded and shifted
+ entirely within `low' and can be stored in one TOC entry. */
+
+ if (TARGET_64BIT && POINTER_SIZE < GET_MODE_BITSIZE (mode))
+ abort ();/* It would be easy to make this work, but it doesn't now. */
+
+ if (POINTER_SIZE > GET_MODE_BITSIZE (mode))
+ lshift_double (low, high, POINTER_SIZE - GET_MODE_BITSIZE (mode),
+ POINTER_SIZE, &low, &high, 0);
+
+ if (TARGET_64BIT)
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs (DOUBLE_INT_ASM_OP, file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],", (long)high, (long)low);
+ fprintf (file, "0x%lx%08lx\n", (long) high, (long) low);
+ return;
+ }
+ else
+ {
+ if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc ID_%lx_%lx[TC],",
+ (long)high, (long)low);
+ fprintf (file, "0x%lx,0x%lx\n", (long) high, (long) low);
+ }
+ else
+ {
+ if (TARGET_MINIMAL_TOC)
+ fputs ("\t.long ", file);
+ else
+ fprintf (file, "\t.tc IS_%lx[TC],", (long) low);
+ fprintf (file, "0x%lx\n", (long) low);
+ }
+ return;
+ }
+ }
+
+ if (GET_CODE (x) == CONST)
+ {
+ if (GET_CODE (XEXP (x, 0)) != PLUS)
+ abort ();
+
+ base = XEXP (XEXP (x, 0), 0);
+ offset = INTVAL (XEXP (XEXP (x, 0), 1));
+ }
+
+ if (GET_CODE (base) == SYMBOL_REF)
+ name = XSTR (base, 0);
+ else if (GET_CODE (base) == LABEL_REF)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (XEXP (base, 0)));
+ else if (GET_CODE (base) == CODE_LABEL)
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
+ else
+ abort ();
+
+ STRIP_NAME_ENCODING (real_name, name);
+ if (TARGET_MINIMAL_TOC)
+ fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
+ else
+ {
+ fprintf (file, "\t.tc %s", real_name);
+
+ if (offset < 0)
+ fprintf (file, ".N%d", - offset);
+ else if (offset)
+ fprintf (file, ".P%d", offset);
+
+ fputs ("[TC],", file);
+ }
+
+ /* Currently C++ toc references to vtables can be emitted before it
+ is decided whether the vtable is public or private. If this is
+ the case, then the linker will eventually complain that there is
+ a TOC reference to an unknown section. Thus, for vtables only,
+ we emit the TOC reference to reference the symbol and not the
+ section. */
+ if (VTABLE_NAME_P (name))
+ {
+ RS6000_OUTPUT_BASENAME (file, name);
+ if (offset < 0)
+ fprintf (file, "%d", offset);
+ else if (offset > 0)
+ fprintf (file, "+%d", offset);
+ }
+ else
+ output_addr_const (file, x);
+ putc ('\n', file);
+}
+
+/* Output an assembler pseudo-op to write an ASCII string of N characters
+ starting at P to FILE.
+
+ On the RS/6000, we have to do this using the .byte operation and
+ write out special characters outside the quoted string.
+ Also, the assembler is broken; very long strings are truncated,
+ so we must artificially break them up early. */
+
+void
+output_ascii (file, p, n)
+ FILE *file;
+ const char *p;
+ int n;
+{
+ char c;
+ int i, count_string;
+ const char *for_string = "\t.byte \"";
+ const char *for_decimal = "\t.byte ";
+ const char *to_close = NULL;
+
+ count_string = 0;
+ for (i = 0; i < n; i++)
+ {
+ c = *p++;
+ if (c >= ' ' && c < 0177)
+ {
+ if (for_string)
+ fputs (for_string, file);
+ putc (c, file);
+
+ /* Write two quotes to get one. */
+ if (c == '"')
+ {
+ putc (c, file);
+ ++count_string;
+ }
+
+ for_string = NULL;
+ for_decimal = "\"\n\t.byte ";
+ to_close = "\"\n";
+ ++count_string;
+
+ if (count_string >= 512)
+ {
+ fputs (to_close, file);
+
+ for_string = "\t.byte \"";
+ for_decimal = "\t.byte ";
+ to_close = NULL;
+ count_string = 0;
+ }
+ }
+ else
+ {
+ if (for_decimal)
+ fputs (for_decimal, file);
+ fprintf (file, "%d", c);
+
+ for_string = "\n\t.byte \"";
+ for_decimal = ", ";
+ to_close = "\n";
+ count_string = 0;
+ }
+ }
+
+ /* Now close the string if we have written one. Then end the line. */
+ if (to_close)
+ fputs (to_close, file);
+}
+
+/* Generate a unique section name for FILENAME for a section type
+ represented by SECTION_DESC. Output goes into BUF.
+
+ SECTION_DESC can be any string, as long as it is different for each
+ possible section type.
+
+ We name the section in the same manner as xlc. The name begins with an
+ underscore followed by the filename (after stripping any leading directory
+ names) with the last period replaced by the string SECTION_DESC. If
+ FILENAME does not contain a period, SECTION_DESC is appended to the end of
+ the name. */
+
+void
+rs6000_gen_section_name (buf, filename, section_desc)
+ char **buf;
+ const char *filename;
+ const char *section_desc;
+{
+ const char *q, *after_last_slash, *last_period = 0;
+ char *p;
+ int len;
+
+ after_last_slash = filename;
+ for (q = filename; *q; q++)
+ {
+ if (*q == '/')
+ after_last_slash = q + 1;
+ else if (*q == '.')
+ last_period = q;
+ }
+
+ len = strlen (after_last_slash) + strlen (section_desc) + 2;
+ *buf = (char *) permalloc (len);
+
+ p = *buf;
+ *p++ = '_';
+
+ for (q = after_last_slash; *q; q++)
+ {
+ if (q == last_period)
+ {
+ strcpy (p, section_desc);
+ p += strlen (section_desc);
+ }
+
+ else if (ISALNUM (*q))
+ *p++ = *q;
+ }
+
+ if (last_period == 0)
+ strcpy (p, section_desc);
+ else
+ *p = '\0';
+}
+
+/* Emit profile function. */
+
+void
+output_profile_hook (labelno)
+ int labelno;
+{
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+ char buf[30];
+ const char *label_name;
+ rtx fun;
+
+ labelno += 1;
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ STRIP_NAME_ENCODING (label_name, ggc_strdup (buf));
+ fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
+
+ emit_library_call (init_one_libfunc (RS6000_MCOUNT), 0, VOIDmode, 1,
+ fun, Pmode);
+ }
+ else if (DEFAULT_ABI == ABI_DARWIN)
+ {
+ const char *mcount_name = RS6000_MCOUNT;
+ int caller_addr_regno = LINK_REGISTER_REGNUM;
+
+ /* Be conservative and always set this, at least for now. */
+ current_function_uses_pic_offset_table = 1;
+
+#if TARGET_MACHO
+ /* For PIC code, set up a stub and collect the caller's address
+ from r0, which is where the prologue puts it. */
+ if (flag_pic)
+ {
+ mcount_name = machopic_stub_name (mcount_name);
+ if (current_function_uses_pic_offset_table)
+ caller_addr_regno = 0;
+ }
+#endif
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
+ 0, VOIDmode, 1,
+ gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
+ }
+}
+
+/* Write function profiler code. */
+
+void
+output_function_profiler (file, labelno)
+ FILE *file;
+ int labelno;
+{
+ char buf[100];
+
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_V4:
+ case ABI_AIX_NODESC:
+ fprintf (file, "\tmflr %s\n", reg_names[0]);
+ if (flag_pic == 1)
+ {
+ fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
+ asm_fprintf (file, "\t{l|lwz} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@got(%s)\n", reg_names[12]);
+ }
+ else if (flag_pic > 1)
+ {
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ /* Now, we need to get the address of the label. */
+ fputs ("\tbl 1f\n\t.long ", file);
+ assemble_name (file, buf);
+ fputs ("-.\n1:", file);
+ asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
+ asm_fprintf (file, "\t{l|lwz} %s,0(%s)\n",
+ reg_names[0], reg_names[11]);
+ asm_fprintf (file, "\t{cax|add} %s,%s,%s\n",
+ reg_names[0], reg_names[0], reg_names[11]);
+ }
+ else
+ {
+ asm_fprintf (file, "\t{liu|lis} %s,", reg_names[12]);
+ assemble_name (file, buf);
+ fputs ("@ha\n", file);
+ asm_fprintf (file, "\t{st|stw} %s,4(%s)\n",
+ reg_names[0], reg_names[1]);
+ asm_fprintf (file, "\t{cal|la} %s,", reg_names[0]);
+ assemble_name (file, buf);
+ asm_fprintf (file, "@l(%s)\n", reg_names[12]);
+ }
+
+ if (current_function_needs_context)
+ asm_fprintf (file, "\tmr %s,%s\n",
+ reg_names[30], reg_names[STATIC_CHAIN_REGNUM]);
+ fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
+ if (current_function_needs_context)
+ asm_fprintf (file, "\tmr %s,%s\n",
+ reg_names[STATIC_CHAIN_REGNUM], reg_names[30]);
+ break;
+
+ case ABI_AIX:
+ case ABI_DARWIN:
+ /* Don't do anything, done in output_profile_hook (). */
+ break;
+
+ }
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+static int
+rs6000_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn ATTRIBUTE_UNUSED;
+ int cost;
+{
+ if (! recog_memoized (insn))
+ return 0;
+
+ if (REG_NOTE_KIND (link) != 0)
+ return 0;
+
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ /* Data dependency; DEP_INSN writes a register that INSN reads
+ some cycles later. */
+ switch (get_attr_type (insn))
+ {
+ case TYPE_JMPREG:
+ /* Tell the first scheduling pass about the latency between
+ a mtctr and bctr (and mtlr and br/blr). The first
+ scheduling pass will not know about this latency since
+ the mtctr instruction, which has the latency associated
+ to it, will be generated by reload. */
+ return TARGET_POWER ? 5 : 4;
+ case TYPE_BRANCH:
+ /* Leave some extra cycles between a compare and its
+ dependent branch, to inhibit expensive mispredicts. */
+ if ((rs6000_cpu_attr == CPU_PPC750
+ || rs6000_cpu_attr == CPU_PPC7400
+ || rs6000_cpu_attr == CPU_PPC7450)
+ && recog_memoized (dep_insn)
+ && (INSN_CODE (dep_insn) >= 0)
+ && (get_attr_type (dep_insn) == TYPE_COMPARE
+ || get_attr_type (dep_insn) == TYPE_DELAYED_COMPARE
+ || get_attr_type (dep_insn) == TYPE_FPCOMPARE
+ || get_attr_type (dep_insn) == TYPE_CR_LOGICAL))
+ return cost + 2;
+ default:
+ break;
+ }
+ /* Fall out to return default cost. */
+ }
+
+ return cost;
+}
+
+/* A C statement (sans semicolon) to update the integer scheduling
+ priority INSN_PRIORITY (INSN). Reduce the priority to execute the
+ INSN earlier, increase the priority to execute INSN later. Do not
+ define this macro if you do not need to adjust the scheduling
+ priorities of insns. */
+
+static int
+rs6000_adjust_priority (insn, priority)
+ rtx insn ATTRIBUTE_UNUSED;
+ int priority;
+{
+ /* On machines (like the 750) which have asymmetric integer units,
+ where one integer unit can do multiply and divides and the other
+ can't, reduce the priority of multiply/divide so it is scheduled
+ before other integer operations. */
+
+#if 0
+ if (! INSN_P (insn))
+ return priority;
+
+ if (GET_CODE (PATTERN (insn)) == USE)
+ return priority;
+
+ switch (rs6000_cpu_attr) {
+ case CPU_PPC750:
+ switch (get_attr_type (insn))
+ {
+ default:
+ break;
+
+ case TYPE_IMUL:
+ case TYPE_IDIV:
+ fprintf (stderr, "priority was %#x (%d) before adjustment\n",
+ priority, priority);
+ if (priority >= 0 && priority < 0x01000000)
+ priority >>= 3;
+ break;
+ }
+ }
+#endif
+
+ return priority;
+}
+
+/* Return how many instructions the machine can issue per cycle. */
+
+static int
+rs6000_issue_rate ()
+{
+ switch (rs6000_cpu_attr) {
+ case CPU_RIOS1: /* ? */
+ case CPU_RS64A:
+ case CPU_PPC601: /* ? */
+ case CPU_PPC7450:
+ return 3;
+ case CPU_PPC603:
+ case CPU_PPC750:
+ case CPU_PPC7400:
+ return 2;
+ case CPU_RIOS2:
+ case CPU_PPC604:
+ case CPU_PPC604E:
+ case CPU_PPC620:
+ case CPU_PPC630:
+ return 4;
+ default:
+ return 1;
+ }
+}
+
+
+/* Length in units of the trampoline for entering a nested function. */
+
+int
+rs6000_trampoline_size ()
+{
+ int ret = 0;
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+ case ABI_AIX:
+ ret = (TARGET_32BIT) ? 12 : 24;
+ break;
+
+ case ABI_DARWIN:
+ case ABI_V4:
+ case ABI_AIX_NODESC:
+ ret = (TARGET_32BIT) ? 40 : 48;
+ break;
+ }
+
+ return ret;
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void
+rs6000_initialize_trampoline (addr, fnaddr, cxt)
+ rtx addr;
+ rtx fnaddr;
+ rtx cxt;
+{
+ enum machine_mode pmode = Pmode;
+ int regsize = (TARGET_32BIT) ? 4 : 8;
+ rtx ctx_reg = force_reg (pmode, cxt);
+
+ switch (DEFAULT_ABI)
+ {
+ default:
+ abort ();
+
+/* Macros to shorten the code expansions below. */
+#define MEM_DEREF(addr) gen_rtx_MEM (pmode, memory_address (pmode, addr))
+#define MEM_PLUS(addr,offset) \
+ gen_rtx_MEM (pmode, memory_address (pmode, plus_constant (addr, offset)))
+
+ /* Under AIX, just build the 3 word function descriptor */
+ case ABI_AIX:
+ {
+ rtx fn_reg = gen_reg_rtx (pmode);
+ rtx toc_reg = gen_reg_rtx (pmode);
+ emit_move_insn (fn_reg, MEM_DEREF (fnaddr));
+ emit_move_insn (toc_reg, MEM_PLUS (fnaddr, regsize));
+ emit_move_insn (MEM_DEREF (addr), fn_reg);
+ emit_move_insn (MEM_PLUS (addr, regsize), toc_reg);
+ emit_move_insn (MEM_PLUS (addr, 2*regsize), ctx_reg);
+ }
+ break;
+
+ /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
+ case ABI_DARWIN:
+ case ABI_V4:
+ case ABI_AIX_NODESC:
+ emit_library_call (gen_rtx_SYMBOL_REF (SImode, "__trampoline_setup"),
+ FALSE, VOIDmode, 4,
+ addr, pmode,
+ GEN_INT (rs6000_trampoline_size ()), SImode,
+ fnaddr, pmode,
+ ctx_reg, pmode);
+ break;
+ }
+
+ return;
+}
+
+
+/* Table of valid machine attributes. */
+
+const struct attribute_spec rs6000_attribute_table[] =
+{
+ /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
+ { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute },
+ { NULL, 0, 0, false, false, false, NULL }
+};
+
+/* Handle a "longcall" attribute; arguments as in struct
+ attribute_spec.handler. */
+
+static tree
+rs6000_handle_longcall_attribute (node, name, args, flags, no_add_attrs)
+ tree *node;
+ tree name;
+ tree args ATTRIBUTE_UNUSED;
+ int flags ATTRIBUTE_UNUSED;
+ bool *no_add_attrs;
+{
+ if (TREE_CODE (*node) != FUNCTION_TYPE
+ && TREE_CODE (*node) != FIELD_DECL
+ && TREE_CODE (*node) != TYPE_DECL)
+ {
+ warning ("`%s' attribute only applies to functions",
+ IDENTIFIER_POINTER (name));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
+/* Return a reference suitable for calling a function with the
+ longcall attribute. */
+
+struct rtx_def *
+rs6000_longcall_ref (call_ref)
+ rtx call_ref;
+{
+ const char *call_name;
+ tree node;
+
+ if (GET_CODE (call_ref) != SYMBOL_REF)
+ return call_ref;
+
+ /* System V adds '.' to the internal name, so skip them. */
+ call_name = XSTR (call_ref, 0);
+ if (*call_name == '.')
+ {
+ while (*call_name == '.')
+ call_name++;
+
+ node = get_identifier (call_name);
+ call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
+ }
+
+ return force_reg (Pmode, call_ref);
+}
+
+
+/* A C statement or statements to switch to the appropriate section
+ for output of RTX in mode MODE. You can assume that RTX is some
+ kind of constant in RTL. The argument MODE is redundant except in
+ the case of a `const_int' rtx. Select the section by calling
+ `text_section' or one of the alternatives for other sections.
+
+ Do not define this macro if you put all constants in the read-only
+ data section. */
+
+#ifdef USING_ELFOS_H
+
+void
+rs6000_select_rtx_section (mode, x)
+ enum machine_mode mode;
+ rtx x;
+{
+ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
+ toc_section ();
+ else
+ const_section ();
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+
+void
+rs6000_select_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int size = int_size_in_bytes (TREE_TYPE (decl));
+ int needs_sdata;
+ int readonly;
+ static void (* const sec_funcs[4]) PARAMS ((void)) = {
+ &const_section,
+ &sdata2_section,
+ &data_section,
+ &sdata_section
+ };
+
+ needs_sdata = (size > 0
+ && size <= g_switch_value
+ && rs6000_sdata != SDATA_NONE
+ && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)));
+
+ if (TREE_CODE (decl) == STRING_CST)
+ readonly = ! flag_writable_strings;
+ else if (TREE_CODE (decl) == VAR_DECL)
+ readonly = (! (flag_pic && reloc)
+ && TREE_READONLY (decl)
+ && ! TREE_SIDE_EFFECTS (decl)
+ && DECL_INITIAL (decl)
+ && DECL_INITIAL (decl) != error_mark_node
+ && TREE_CONSTANT (DECL_INITIAL (decl)));
+ else if (TREE_CODE (decl) == CONSTRUCTOR)
+ readonly = (! (flag_pic && reloc)
+ && ! TREE_SIDE_EFFECTS (decl)
+ && TREE_CONSTANT (decl));
+ else
+ readonly = 1;
+ if (needs_sdata && rs6000_sdata != SDATA_EABI)
+ readonly = 0;
+
+ (*sec_funcs[(readonly ? 0 : 2) + (needs_sdata ? 1 : 0)])();
+}
+
+/* A C statement to build up a unique section name, expressed as a
+ STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
+ RELOC indicates whether the initial value of EXP requires
+ link-time relocations. If you do not define this macro, GCC will use
+ the symbol name prefixed by `.' as the section name. Note - this
+ macro can now be called for uninitialized data items as well as
+ initialised data and functions. */
+
+void
+rs6000_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int len;
+ int sec;
+ const char *name;
+ char *string;
+ const char *prefix;
+
+ static const char *const prefixes[7][2] =
+ {
+ { ".rodata.", ".gnu.linkonce.r." },
+ { ".sdata2.", ".gnu.linkonce.s2." },
+ { ".data.", ".gnu.linkonce.d." },
+ { ".sdata.", ".gnu.linkonce.s." },
+ { ".bss.", ".gnu.linkonce.b." },
+ { ".sbss.", ".gnu.linkonce.sb." },
+ { ".text.", ".gnu.linkonce.t." }
+ };
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ sec = 6;
+ else
+ {
+ int readonly;
+ int needs_sdata;
+ int size;
+
+ readonly = 1;
+ if (TREE_CODE (decl) == STRING_CST)
+ readonly = ! flag_writable_strings;
+ else if (TREE_CODE (decl) == VAR_DECL)
+ readonly = (! (flag_pic && reloc)
+ && TREE_READONLY (decl)
+ && ! TREE_SIDE_EFFECTS (decl)
+ && TREE_CONSTANT (DECL_INITIAL (decl)));
+
+ size = int_size_in_bytes (TREE_TYPE (decl));
+ needs_sdata = (size > 0
+ && size <= g_switch_value
+ && rs6000_sdata != SDATA_NONE
+ && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)));
+
+ if (DECL_INITIAL (decl) == 0
+ || DECL_INITIAL (decl) == error_mark_node)
+ sec = 4;
+ else if (! readonly)
+ sec = 2;
+ else
+ sec = 0;
+
+ if (needs_sdata)
+ {
+ /* .sdata2 is only for EABI. */
+ if (sec == 0 && rs6000_sdata != SDATA_EABI)
+ sec = 2;
+ sec += 1;
+ }
+ }
+
+ STRIP_NAME_ENCODING (name, IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl)));
+ prefix = prefixes[sec][DECL_ONE_ONLY (decl)];
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
+
+
+/* If we are referencing a function that is static or is known to be
+ in this file, make the SYMBOL_REF special. We can use this to indicate
+ that we can branch to this function without emitting a no-op after the
+ call. For real AIX calling sequences, we also replace the
+ function name with the real name (1 or 2 leading .'s), rather than
+ the function descriptor name. This saves a lot of overriding code
+ to read the prefixes. */
+
+void
+rs6000_encode_section_info (decl)
+ tree decl;
+{
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ {
+ rtx sym_ref = XEXP (DECL_RTL (decl), 0);
+ if ((TREE_ASM_WRITTEN (decl) || ! TREE_PUBLIC (decl))
+ && ! DECL_WEAK (decl))
+ SYMBOL_REF_FLAG (sym_ref) = 1;
+
+ if (DEFAULT_ABI == ABI_AIX)
+ {
+ size_t len1 = (DEFAULT_ABI == ABI_AIX) ? 1 : 2;
+ size_t len2 = strlen (XSTR (sym_ref, 0));
+ char *str = alloca (len1 + len2 + 1);
+ str[0] = '.';
+ str[1] = '.';
+ memcpy (str + len1, XSTR (sym_ref, 0), len2 + 1);
+
+ XSTR (sym_ref, 0) = ggc_alloc_string (str, len1 + len2);
+ }
+ }
+ else if (rs6000_sdata != SDATA_NONE
+ && DEFAULT_ABI == ABI_V4
+ && TREE_CODE (decl) == VAR_DECL)
+ {
+ int size = int_size_in_bytes (TREE_TYPE (decl));
+ tree section_name = DECL_SECTION_NAME (decl);
+ const char *name = (char *)0;
+ int len = 0;
+
+ if (section_name)
+ {
+ if (TREE_CODE (section_name) == STRING_CST)
+ {
+ name = TREE_STRING_POINTER (section_name);
+ len = TREE_STRING_LENGTH (section_name);
+ }
+ else
+ abort ();
+ }
+
+ if ((size > 0 && size <= g_switch_value)
+ || (name
+ && ((len == sizeof (".sdata") - 1
+ && strcmp (name, ".sdata") == 0)
+ || (len == sizeof (".sdata2") - 1
+ && strcmp (name, ".sdata2") == 0)
+ || (len == sizeof (".sbss") - 1
+ && strcmp (name, ".sbss") == 0)
+ || (len == sizeof (".sbss2") - 1
+ && strcmp (name, ".sbss2") == 0)
+ || (len == sizeof (".PPC.EMB.sdata0") - 1
+ && strcmp (name, ".PPC.EMB.sdata0") == 0)
+ || (len == sizeof (".PPC.EMB.sbss0") - 1
+ && strcmp (name, ".PPC.EMB.sbss0") == 0))))
+ {
+ rtx sym_ref = XEXP (DECL_RTL (decl), 0);
+ size_t len = strlen (XSTR (sym_ref, 0));
+ char *str = alloca (len + 2);
+
+ str[0] = '@';
+ memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
+ XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
+ }
+ }
+}
+
+#endif /* USING_ELFOS_H */
+
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG.
+
+ r0 is special and we must not select it as an address
+ register by this routine since our caller will try to
+ increment the returned register via an "la" instruction. */
+
+struct rtx_def *
+find_addr_reg (addr)
+ rtx addr;
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != 0)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != 0)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ abort ();
+ }
+ if (GET_CODE (addr) == REG && REGNO (addr) != 0)
+ return addr;
+ abort ();
+}
+
+void
+rs6000_fatal_bad_address (op)
+ rtx op;
+{
+ fatal_insn ("bad address", op);
+}
+
+/* Called to register all of our global variables with the garbage
+ collector. */
+
+static void
+rs6000_add_gc_roots ()
+{
+ ggc_add_rtx_root (&rs6000_compare_op0, 1);
+ ggc_add_rtx_root (&rs6000_compare_op1, 1);
+
+ toc_hash_table = htab_create (1021, toc_hash_function, toc_hash_eq, NULL);
+ ggc_add_root (&toc_hash_table, 1, sizeof (toc_hash_table),
+ toc_hash_mark_table);
+
+#if TARGET_MACHO
+ machopic_add_gc_roots ();
+#endif
+}
+
+#if TARGET_MACHO
+
+#if 0
+/* Returns 1 if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+int
+symbolic_operand (op)
+ rtx op;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+ case CONST:
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == SYMBOL_REF ||
+ (GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+ default:
+ return 0;
+ }
+}
+#endif
+
+#ifdef RS6000_LONG_BRANCH
+
+static tree stub_list = 0;
+
+/* ADD_COMPILER_STUB adds the compiler generated stub for handling
+ procedure calls to the linked list. */
+
+void
+add_compiler_stub (label_name, function_name, line_number)
+ tree label_name;
+ tree function_name;
+ int line_number;
+{
+ tree stub = build_tree_list (function_name, label_name);
+ TREE_TYPE (stub) = build_int_2 (line_number, 0);
+ TREE_CHAIN (stub) = stub_list;
+ stub_list = stub;
+}
+
+#define STUB_LABEL_NAME(STUB) TREE_VALUE (STUB)
+#define STUB_FUNCTION_NAME(STUB) TREE_PURPOSE (STUB)
+#define STUB_LINE_NUMBER(STUB) TREE_INT_CST_LOW (TREE_TYPE (STUB))
+
+/* OUTPUT_COMPILER_STUB outputs the compiler generated stub for
+ handling procedure calls from the linked list and initializes the
+ linked list. */
+
+void
+output_compiler_stub ()
+{
+ char tmp_buf[256];
+ char label_buf[256];
+ char *label;
+ tree tmp_stub, stub;
+
+ if (!flag_pic)
+ for (stub = stub_list; stub; stub = TREE_CHAIN (stub))
+ {
+ fprintf (asm_out_file,
+ "%s:\n", IDENTIFIER_POINTER(STUB_LABEL_NAME(stub)));
+
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ fprintf (asm_out_file, "\t.stabd 68,0,%d\n", STUB_LINE_NUMBER(stub));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+
+ if (IDENTIFIER_POINTER (STUB_FUNCTION_NAME (stub))[0] == '*')
+ strcpy (label_buf,
+ IDENTIFIER_POINTER (STUB_FUNCTION_NAME (stub))+1);
+ else
+ {
+ label_buf[0] = '_';
+ strcpy (label_buf+1,
+ IDENTIFIER_POINTER (STUB_FUNCTION_NAME (stub)));
+ }
+
+ strcpy (tmp_buf, "lis r12,hi16(");
+ strcat (tmp_buf, label_buf);
+ strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
+ strcat (tmp_buf, label_buf);
+ strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
+ output_asm_insn (tmp_buf, 0);
+
+#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+ if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+ fprintf(asm_out_file, "\t.stabd 68,0,%d\n", STUB_LINE_NUMBER (stub));
+#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+ }
+
+ stub_list = 0;
+}
+
+/* NO_PREVIOUS_DEF checks in the link list whether the function name is
+ already there or not. */
+
+int
+no_previous_def (function_name)
+ tree function_name;
+{
+ tree stub;
+ for (stub = stub_list; stub; stub = TREE_CHAIN (stub))
+ if (function_name == STUB_FUNCTION_NAME (stub))
+ return 0;
+ return 1;
+}
+
+/* GET_PREV_LABEL gets the label name from the previous definition of
+ the function. */
+
+tree
+get_prev_label (function_name)
+ tree function_name;
+{
+ tree stub;
+ for (stub = stub_list; stub; stub = TREE_CHAIN (stub))
+ if (function_name == STUB_FUNCTION_NAME (stub))
+ return STUB_LABEL_NAME (stub);
+ return 0;
+}
+
+/* INSN is either a function call or a millicode call. It may have an
+ unconditional jump in its delay slot.
+
+ CALL_DEST is the routine we are calling. */
+
+char *
+output_call (insn, call_dest, operand_number)
+ rtx insn;
+ rtx call_dest;
+ int operand_number;
+{
+ static char buf[256];
+ if (GET_CODE (call_dest) == SYMBOL_REF && TARGET_LONG_BRANCH && !flag_pic)
+ {
+ tree labelname;
+ tree funname = get_identifier (XSTR (call_dest, 0));
+
+ if (no_previous_def (funname))
+ {
+ int line_number;
+ rtx label_rtx = gen_label_rtx ();
+ char *label_buf, temp_buf[256];
+ ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
+ CODE_LABEL_NUMBER (label_rtx));
+ label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
+ labelname = get_identifier (label_buf);
+ for (; insn && GET_CODE (insn) != NOTE; insn = PREV_INSN (insn));
+ if (insn)
+ line_number = NOTE_LINE_NUMBER (insn);
+ add_compiler_stub (labelname, funname, line_number);
+ }
+ else
+ labelname = get_prev_label (funname);
+
+ sprintf (buf, "jbsr %%z%d,%.246s",
+ operand_number, IDENTIFIER_POINTER (labelname));
+ return buf;
+ }
+ else
+ {
+ sprintf (buf, "bl %%z%d", operand_number);
+ return buf;
+ }
+}
+
+#endif /* RS6000_LONG_BRANCH */
+
+#define GEN_LOCAL_LABEL_FOR_SYMBOL(BUF,SYMBOL,LENGTH,N) \
+ do { \
+ const char *const symbol_ = (SYMBOL); \
+ char *buffer_ = (BUF); \
+ if (symbol_[0] == '"') \
+ { \
+ sprintf(buffer_, "\"L%d$%s", (N), symbol_+1); \
+ } \
+ else if (name_needs_quotes(symbol_)) \
+ { \
+ sprintf(buffer_, "\"L%d$%s\"", (N), symbol_); \
+ } \
+ else \
+ { \
+ sprintf(buffer_, "L%d$%s", (N), symbol_); \
+ } \
+ } while (0)
+
+
+/* Generate PIC and indirect symbol stubs. */
+
+void
+machopic_output_stub (file, symb, stub)
+ FILE *file;
+ const char *symb, *stub;
+{
+ unsigned int length;
+ char *symbol_name, *lazy_ptr_name;
+ char *local_label_0;
+ static int label = 0;
+
+ /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
+ STRIP_NAME_ENCODING (symb, symb);
+
+ label += 1;
+
+ length = strlen (symb);
+ symbol_name = alloca (length + 32);
+ GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
+
+ lazy_ptr_name = alloca (length + 32);
+ GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
+
+ local_label_0 = alloca (length + 32);
+ GEN_LOCAL_LABEL_FOR_SYMBOL (local_label_0, symb, length, 0);
+
+ if (flag_pic == 2)
+ machopic_picsymbol_stub_section ();
+ else
+ machopic_symbol_stub_section ();
+
+ fprintf (file, "%s:\n", stub);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+
+ if (flag_pic == 2)
+ {
+ fprintf (file, "\tmflr r0\n");
+ fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
+ fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
+ fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtlr r0\n");
+ fprintf (file, "\tlwz r12,lo16(%s-%s)(r11)\n",
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tmtctr r12\n");
+ fprintf (file, "\taddi r11,r11,lo16(%s-%s)\n",
+ lazy_ptr_name, local_label_0);
+ fprintf (file, "\tbctr\n");
+ }
+ else
+ fprintf (file, "non-pure not supported\n");
+
+ machopic_lazy_symbol_ptr_section ();
+ fprintf (file, "%s:\n", lazy_ptr_name);
+ fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
+ fprintf (file, "\t.long dyld_stub_binding_helper\n");
+}
+
+/* Legitimize PIC addresses. If the address is already
+ position-independent, we return ORIG. Newly generated
+ position-independent addresses go into a reg. This is REG if non
+ zero, otherwise we allocate register(s) as necessary. */
+
+#define SMALL_INT(X) ((unsigned) (INTVAL(X) + 0x8000) < 0x10000)
+
+rtx
+rs6000_machopic_legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode;
+ rtx reg;
+{
+ rtx base, offset;
+
+ if (reg == NULL && ! reload_in_progress && ! reload_completed)
+ reg = gen_reg_rtx (Pmode);
+
+ if (GET_CODE (orig) == CONST)
+ {
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base =
+ rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
+ Pmode, reg);
+ offset =
+ rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
+ Pmode, reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant (base, INTVAL (offset));
+ else if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ {
+ rtx mem = force_const_mem (Pmode, orig);
+ return machopic_legitimize_pic_address (mem, Pmode, reg);
+ }
+ }
+ return gen_rtx (PLUS, Pmode, base, offset);
+ }
+
+ /* Fall back on generic machopic code. */
+ return machopic_legitimize_pic_address (orig, mode, reg);
+}
+
+/* This is just a placeholder to make linking work without having to
+ add this to the generic Darwin EXTRA_SECTIONS. If -mcall-aix is
+ ever needed for Darwin (not too likely!) this would have to get a
+ real definition. */
+
+void
+toc_section ()
+{
+}
+
+#endif /* TARGET_MACHO */
+
+#if TARGET_ELF
+static unsigned int
+rs6000_elf_section_type_flags (decl, name, reloc)
+ tree decl;
+ const char *name;
+ int reloc;
+{
+ unsigned int flags = default_section_type_flags (decl, name, reloc);
+
+ if (TARGET_RELOCATABLE)
+ flags |= SECTION_WRITE;
+
+ return flags;
+}
+
+/* Record an element in the table of global constructors. SYMBOL is
+ a SYMBOL_REF of the function to be called; PRIORITY is a number
+ between 0 and MAX_INIT_PRIORITY.
+
+ This differs from default_named_section_asm_out_constructor in
+ that we have special handling for -mrelocatable. */
+
+static void
+rs6000_elf_asm_out_constructor (symbol, priority)
+ rtx symbol;
+ int priority;
+{
+ const char *section = ".ctors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".ctors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ named_section_flags (section, SECTION_WRITE);
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+
+static void
+rs6000_elf_asm_out_destructor (symbol, priority)
+ rtx symbol;
+ int priority;
+{
+ const char *section = ".dtors";
+ char buf[16];
+
+ if (priority != DEFAULT_INIT_PRIORITY)
+ {
+ sprintf (buf, ".dtors.%.5u",
+ /* Invert the numbering so the linker puts us in the proper
+ order; constructors are run from right to left, and the
+ linker sorts in increasing order. */
+ MAX_INIT_PRIORITY - priority);
+ section = buf;
+ }
+
+ named_section_flags (section, SECTION_WRITE);
+ assemble_align (POINTER_SIZE);
+
+ if (TARGET_RELOCATABLE)
+ {
+ fputs ("\t.long (", asm_out_file);
+ output_addr_const (asm_out_file, symbol);
+ fputs (")@fixup\n", asm_out_file);
+ }
+ else
+ assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
+}
+#endif
+
+#ifdef OBJECT_FORMAT_COFF
+static void
+xcoff_asm_named_section (name, flags)
+ const char *name;
+ unsigned int flags ATTRIBUTE_UNUSED;
+{
+ fprintf (asm_out_file, "\t.csect %s\n", name);
+}
+#endif
diff --git a/contrib/gcc/config/rs6000/rs6000.h b/contrib/gcc/config/rs6000/rs6000.h
new file mode 100644
index 0000000..2deaf75
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rs6000.h
@@ -0,0 +1,2974 @@
+/* Definitions of target machine for GNU compiler, for IBM RS/6000.
+ Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
+ 2000, 2001, 2002 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Note that some other tm.h files include this one and then override
+ many of the definitions. */
+
+/* Definitions for the object file format. These are set at
+ compile-time. */
+
+#define OBJECT_XCOFF 1
+#define OBJECT_ELF 2
+#define OBJECT_PEF 3
+#define OBJECT_MACHO 4
+
+#define TARGET_ELF (TARGET_OBJECT_FORMAT == OBJECT_ELF)
+#define TARGET_XCOFF (TARGET_OBJECT_FORMAT == OBJECT_XCOFF)
+#define TARGET_MACOS (TARGET_OBJECT_FORMAT == OBJECT_PEF)
+#define TARGET_MACHO (TARGET_OBJECT_FORMAT == OBJECT_MACHO)
+
+#ifndef TARGET_AIX
+#define TARGET_AIX 0
+#endif
+
+/* Default string to use for cpu if not specified. */
+#ifndef TARGET_CPU_DEFAULT
+#define TARGET_CPU_DEFAULT ((char *)0)
+#endif
+
+/* Common CPP definitions used by CPP_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#define CPP_CPU_SPEC \
+"%{!mcpu*: \
+ %{mpower: %{!mpower2: -D_ARCH_PWR}} \
+ %{mpower2: -D_ARCH_PWR2} \
+ %{mpowerpc*: -D_ARCH_PPC} \
+ %{mno-power: %{!mpowerpc*: -D_ARCH_COM}} \
+ %{!mno-power: %{!mpower2: %(cpp_default)}}} \
+%{mcpu=common: -D_ARCH_COM} \
+%{mcpu=power: -D_ARCH_PWR} \
+%{mcpu=power2: -D_ARCH_PWR2} \
+%{mcpu=powerpc: -D_ARCH_PPC} \
+%{mcpu=rios: -D_ARCH_PWR} \
+%{mcpu=rios1: -D_ARCH_PWR} \
+%{mcpu=rios2: -D_ARCH_PWR2} \
+%{mcpu=rsc: -D_ARCH_PWR} \
+%{mcpu=rsc1: -D_ARCH_PWR} \
+%{mcpu=401: -D_ARCH_PPC} \
+%{mcpu=403: -D_ARCH_PPC} \
+%{mcpu=405: -D_ARCH_PPC} \
+%{mcpu=505: -D_ARCH_PPC} \
+%{mcpu=601: -D_ARCH_PPC -D_ARCH_PWR} \
+%{mcpu=602: -D_ARCH_PPC} \
+%{mcpu=603: -D_ARCH_PPC} \
+%{mcpu=603e: -D_ARCH_PPC} \
+%{mcpu=ec603e: -D_ARCH_PPC} \
+%{mcpu=604: -D_ARCH_PPC} \
+%{mcpu=604e: -D_ARCH_PPC} \
+%{mcpu=620: -D_ARCH_PPC} \
+%{mcpu=740: -D_ARCH_PPC} \
+%{mcpu=7400: -D_ARCH_PPC} \
+%{mcpu=7450: -D_ARCH_PPC} \
+%{mcpu=750: -D_ARCH_PPC} \
+%{mcpu=801: -D_ARCH_PPC} \
+%{mcpu=821: -D_ARCH_PPC} \
+%{mcpu=823: -D_ARCH_PPC} \
+%{mcpu=860: -D_ARCH_PPC} \
+%{maltivec: -D__ALTIVEC__}"
+
+/* Common ASM definitions used by ASM_SPEC among the various targets
+ for handling -mcpu=xxx switches. */
+#define ASM_CPU_SPEC \
+"%{!mcpu*: \
+ %{mpower: %{!mpower2: -mpwr}} \
+ %{mpower2: -mpwrx} \
+ %{mpowerpc*: -mppc} \
+ %{mno-power: %{!mpowerpc*: -mcom}} \
+ %{!mno-power: %{!mpower2: %(asm_default)}}} \
+%{mcpu=common: -mcom} \
+%{mcpu=power: -mpwr} \
+%{mcpu=power2: -mpwrx} \
+%{mcpu=powerpc: -mppc} \
+%{mcpu=rios: -mpwr} \
+%{mcpu=rios1: -mpwr} \
+%{mcpu=rios2: -mpwrx} \
+%{mcpu=rsc: -mpwr} \
+%{mcpu=rsc1: -mpwr} \
+%{mcpu=401: -mppc} \
+%{mcpu=403: -mppc} \
+%{mcpu=405: -mppc} \
+%{mcpu=505: -mppc} \
+%{mcpu=601: -m601} \
+%{mcpu=602: -mppc} \
+%{mcpu=603: -mppc} \
+%{mcpu=603e: -mppc} \
+%{mcpu=ec603e: -mppc} \
+%{mcpu=604: -mppc} \
+%{mcpu=604e: -mppc} \
+%{mcpu=620: -mppc} \
+%{mcpu=740: -mppc} \
+%{mcpu=7400: -mppc} \
+%{mcpu=7450: -mppc} \
+%{mcpu=750: -mppc} \
+%{mcpu=801: -mppc} \
+%{mcpu=821: -mppc} \
+%{mcpu=823: -mppc} \
+%{mcpu=860: -mppc} \
+%{maltivec: -maltivec}"
+
+#define CPP_DEFAULT_SPEC ""
+
+#define ASM_DEFAULT_SPEC ""
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define SUBTARGET_EXTRA_SPECS
+
+#define EXTRA_SPECS \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cpp_default", CPP_DEFAULT_SPEC }, \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "asm_default", ASM_DEFAULT_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+/* Architecture type. */
+
+extern int target_flags;
+
+/* Use POWER architecture instructions and MQ register. */
+#define MASK_POWER 0x00000001
+
+/* Use POWER2 extensions to POWER architecture. */
+#define MASK_POWER2 0x00000002
+
+/* Use PowerPC architecture instructions. */
+#define MASK_POWERPC 0x00000004
+
+/* Use PowerPC General Purpose group optional instructions, e.g. fsqrt. */
+#define MASK_PPC_GPOPT 0x00000008
+
+/* Use PowerPC Graphics group optional instructions, e.g. fsel. */
+#define MASK_PPC_GFXOPT 0x00000010
+
+/* Use PowerPC-64 architecture instructions. */
+#define MASK_POWERPC64 0x00000020
+
+/* Use revised mnemonic names defined for PowerPC architecture. */
+#define MASK_NEW_MNEMONICS 0x00000040
+
+/* Disable placing fp constants in the TOC; can be turned on when the
+ TOC overflows. */
+#define MASK_NO_FP_IN_TOC 0x00000080
+
+/* Disable placing symbol+offset constants in the TOC; can be turned on when
+ the TOC overflows. */
+#define MASK_NO_SUM_IN_TOC 0x00000100
+
+/* Output only one TOC entry per module. Normally linking fails if
+ there are more than 16K unique variables/constants in an executable. With
+ this option, linking fails only if there are more than 16K modules, or
+ if there are more than 16K unique variables/constant in a single module.
+
+ This is at the cost of having 2 extra loads and one extra store per
+ function, and one less allocable register. */
+#define MASK_MINIMAL_TOC 0x00000200
+
+/* Nonzero for the 64bit model: ints, longs, and pointers are 64 bits. */
+#define MASK_64BIT 0x00000400
+
+/* Disable use of FPRs. */
+#define MASK_SOFT_FLOAT 0x00000800
+
+/* Enable load/store multiple, even on powerpc */
+#define MASK_MULTIPLE 0x00001000
+#define MASK_MULTIPLE_SET 0x00002000
+
+/* Use string instructions for block moves */
+#define MASK_STRING 0x00004000
+#define MASK_STRING_SET 0x00008000
+
+/* Disable update form of load/store */
+#define MASK_NO_UPDATE 0x00010000
+
+/* Disable fused multiply/add operations */
+#define MASK_NO_FUSED_MADD 0x00020000
+
+/* Nonzero if we need to schedule the prolog and epilog. */
+#define MASK_SCHED_PROLOG 0x00040000
+
+/* Use AltiVec instructions. */
+#define MASK_ALTIVEC 0x00080000
+
+/* Return small structures in memory (as the AIX ABI requires). */
+#define MASK_AIX_STRUCT_RET 0x00100000
+#define MASK_AIX_STRUCT_RET_SET 0x00200000
+
+/* The only remaining free bit is 0x00400000. sysv4.h uses
+ 0x00800000 -> 0x40000000, and 0x80000000 is not available
+ because target_flags is signed. */
+
+#define TARGET_POWER (target_flags & MASK_POWER)
+#define TARGET_POWER2 (target_flags & MASK_POWER2)
+#define TARGET_POWERPC (target_flags & MASK_POWERPC)
+#define TARGET_PPC_GPOPT (target_flags & MASK_PPC_GPOPT)
+#define TARGET_PPC_GFXOPT (target_flags & MASK_PPC_GFXOPT)
+#define TARGET_NEW_MNEMONICS (target_flags & MASK_NEW_MNEMONICS)
+#define TARGET_NO_FP_IN_TOC (target_flags & MASK_NO_FP_IN_TOC)
+#define TARGET_NO_SUM_IN_TOC (target_flags & MASK_NO_SUM_IN_TOC)
+#define TARGET_MINIMAL_TOC (target_flags & MASK_MINIMAL_TOC)
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+#define TARGET_SOFT_FLOAT (target_flags & MASK_SOFT_FLOAT)
+#define TARGET_MULTIPLE (target_flags & MASK_MULTIPLE)
+#define TARGET_MULTIPLE_SET (target_flags & MASK_MULTIPLE_SET)
+#define TARGET_STRING (target_flags & MASK_STRING)
+#define TARGET_STRING_SET (target_flags & MASK_STRING_SET)
+#define TARGET_NO_UPDATE (target_flags & MASK_NO_UPDATE)
+#define TARGET_NO_FUSED_MADD (target_flags & MASK_NO_FUSED_MADD)
+#define TARGET_SCHED_PROLOG (target_flags & MASK_SCHED_PROLOG)
+#define TARGET_ALTIVEC (target_flags & MASK_ALTIVEC)
+#define TARGET_AIX_STRUCT_RET (target_flags & MASK_AIX_STRUCT_RET)
+
+#define TARGET_32BIT (! TARGET_64BIT)
+#define TARGET_HARD_FLOAT (! TARGET_SOFT_FLOAT)
+#define TARGET_UPDATE (! TARGET_NO_UPDATE)
+#define TARGET_FUSED_MADD (! TARGET_NO_FUSED_MADD)
+
+#ifdef IN_LIBGCC2
+/* For libgcc2 we make sure this is a compile time constant */
+#if defined (__64BIT__) || defined (__powerpc64__)
+#define TARGET_POWERPC64 1
+#else
+#define TARGET_POWERPC64 0
+#endif
+#else
+#define TARGET_POWERPC64 (target_flags & MASK_POWERPC64)
+#endif
+
+#define TARGET_XL_CALL 0
+
+/* Run-time compilation parameters selecting different hardware subsets.
+
+ Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ {{"power", MASK_POWER | MASK_MULTIPLE | MASK_STRING, \
+ N_("Use POWER instruction set")}, \
+ {"power2", (MASK_POWER | MASK_MULTIPLE | MASK_STRING \
+ | MASK_POWER2), \
+ N_("Use POWER2 instruction set")}, \
+ {"no-power2", - MASK_POWER2, \
+ N_("Do not use POWER2 instruction set")}, \
+ {"no-power", - (MASK_POWER | MASK_POWER2 | MASK_MULTIPLE \
+ | MASK_STRING), \
+ N_("Do not use POWER instruction set")}, \
+ {"powerpc", MASK_POWERPC, \
+ N_("Use PowerPC instruction set")}, \
+ {"no-powerpc", - (MASK_POWERPC | MASK_PPC_GPOPT \
+ | MASK_PPC_GFXOPT | MASK_POWERPC64), \
+ N_("Do not use PowerPC instruction set")}, \
+ {"powerpc-gpopt", MASK_POWERPC | MASK_PPC_GPOPT, \
+ N_("Use PowerPC General Purpose group optional instructions")},\
+ {"no-powerpc-gpopt", - MASK_PPC_GPOPT, \
+ N_("Don't use PowerPC General Purpose group optional instructions")},\
+ {"powerpc-gfxopt", MASK_POWERPC | MASK_PPC_GFXOPT, \
+ N_("Use PowerPC Graphics group optional instructions")},\
+ {"no-powerpc-gfxopt", - MASK_PPC_GFXOPT, \
+ N_("Don't use PowerPC Graphics group optional instructions")},\
+ {"powerpc64", MASK_POWERPC64, \
+ N_("Use PowerPC-64 instruction set")}, \
+ {"no-powerpc64", - MASK_POWERPC64, \
+ N_("Don't use PowerPC-64 instruction set")}, \
+ {"altivec", MASK_ALTIVEC , \
+ N_("Use AltiVec instructions")}, \
+ {"no-altivec", - MASK_ALTIVEC , \
+ N_("Don't use AltiVec instructions")}, \
+ {"new-mnemonics", MASK_NEW_MNEMONICS, \
+ N_("Use new mnemonics for PowerPC architecture")},\
+ {"old-mnemonics", -MASK_NEW_MNEMONICS, \
+ N_("Use old mnemonics for PowerPC architecture")},\
+ {"full-toc", - (MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC \
+ | MASK_MINIMAL_TOC), \
+ N_("Put everything in the regular TOC")}, \
+ {"fp-in-toc", - MASK_NO_FP_IN_TOC, \
+ N_("Place floating point constants in TOC")}, \
+ {"no-fp-in-toc", MASK_NO_FP_IN_TOC, \
+ N_("Don't place floating point constants in TOC")},\
+ {"sum-in-toc", - MASK_NO_SUM_IN_TOC, \
+ N_("Place symbol+offset constants in TOC")}, \
+ {"no-sum-in-toc", MASK_NO_SUM_IN_TOC, \
+ N_("Don't place symbol+offset constants in TOC")},\
+ {"minimal-toc", MASK_MINIMAL_TOC, \
+ "Use only one TOC entry per procedure"}, \
+ {"minimal-toc", - (MASK_NO_FP_IN_TOC | MASK_NO_SUM_IN_TOC), \
+ ""}, \
+ {"no-minimal-toc", - MASK_MINIMAL_TOC, \
+ N_("Place variable addresses in the regular TOC")},\
+ {"hard-float", - MASK_SOFT_FLOAT, \
+ N_("Use hardware fp")}, \
+ {"soft-float", MASK_SOFT_FLOAT, \
+ N_("Do not use hardware fp")}, \
+ {"multiple", MASK_MULTIPLE | MASK_MULTIPLE_SET, \
+ N_("Generate load/store multiple instructions")}, \
+ {"no-multiple", - MASK_MULTIPLE, \
+ N_("Do not generate load/store multiple instructions")},\
+ {"no-multiple", MASK_MULTIPLE_SET, \
+ ""}, \
+ {"string", MASK_STRING | MASK_STRING_SET, \
+ N_("Generate string instructions for block moves")},\
+ {"no-string", - MASK_STRING, \
+ N_("Do not generate string instructions for block moves")},\
+ {"no-string", MASK_STRING_SET, \
+ ""}, \
+ {"update", - MASK_NO_UPDATE, \
+ N_("Generate load/store with update instructions")},\
+ {"no-update", MASK_NO_UPDATE, \
+ N_("Do not generate load/store with update instructions")},\
+ {"fused-madd", - MASK_NO_FUSED_MADD, \
+ N_("Generate fused multiply/add instructions")},\
+ {"no-fused-madd", MASK_NO_FUSED_MADD, \
+ N_("Don't generate fused multiply/add instructions")},\
+ {"sched-prolog", MASK_SCHED_PROLOG, \
+ ""}, \
+ {"no-sched-prolog", -MASK_SCHED_PROLOG, \
+ N_("Don't schedule the start and end of the procedure")},\
+ {"sched-epilog", MASK_SCHED_PROLOG, \
+ ""}, \
+ {"no-sched-epilog", -MASK_SCHED_PROLOG, \
+ ""}, \
+ {"aix-struct-return", MASK_AIX_STRUCT_RET | MASK_AIX_STRUCT_RET_SET, \
+ N_("Return all structures in memory (AIX default)")},\
+ {"svr4-struct-return", - MASK_AIX_STRUCT_RET,\
+ N_("Return small structures in registers (SVR4 default)")},\
+ {"svr4-struct-return",MASK_AIX_STRUCT_RET_SET,\
+ ""},\
+ {"no-aix-struct-return", - MASK_AIX_STRUCT_RET,\
+ ""},\
+ {"no-aix-struct-return", MASK_AIX_STRUCT_RET_SET,\
+ ""},\
+ {"no-svr4-struct-return", MASK_AIX_STRUCT_RET | MASK_AIX_STRUCT_RET_SET,\
+ ""},\
+ SUBTARGET_SWITCHES \
+ {"", TARGET_DEFAULT | MASK_SCHED_PROLOG, \
+ ""}}
+
+#define TARGET_DEFAULT (MASK_POWER | MASK_MULTIPLE | MASK_STRING)
+
+/* This is meant to be redefined in the host dependent files */
+#define SUBTARGET_SWITCHES
+
+/* Processor type. Order must match cpu attribute in MD file. */
+enum processor_type
+ {
+ PROCESSOR_RIOS1,
+ PROCESSOR_RIOS2,
+ PROCESSOR_RS64A,
+ PROCESSOR_MPCCORE,
+ PROCESSOR_PPC403,
+ PROCESSOR_PPC405,
+ PROCESSOR_PPC601,
+ PROCESSOR_PPC603,
+ PROCESSOR_PPC604,
+ PROCESSOR_PPC604e,
+ PROCESSOR_PPC620,
+ PROCESSOR_PPC630,
+ PROCESSOR_PPC750,
+ PROCESSOR_PPC7400,
+ PROCESSOR_PPC7450
+};
+
+extern enum processor_type rs6000_cpu;
+
+/* Recast the processor type to the cpu attribute. */
+#define rs6000_cpu_attr ((enum attr_cpu)rs6000_cpu)
+
+/* Define generic processor types based upon current deployment. */
+#define PROCESSOR_COMMON PROCESSOR_PPC601
+#define PROCESSOR_POWER PROCESSOR_RIOS1
+#define PROCESSOR_POWERPC PROCESSOR_PPC604
+#define PROCESSOR_POWERPC64 PROCESSOR_RS64A
+
+/* Define the default processor. This is overridden by other tm.h files. */
+#define PROCESSOR_DEFAULT PROCESSOR_RIOS1
+#define PROCESSOR_DEFAULT64 PROCESSOR_RS64A
+
+/* Specify the dialect of assembler to use. New mnemonics is dialect one
+ and the old mnemonics are dialect zero. */
+#define ASSEMBLER_DIALECT (TARGET_NEW_MNEMONICS ? 1 : 0)
+
+/* This is meant to be overridden in target specific files. */
+#define SUBTARGET_OPTIONS
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", &rs6000_select[1].string, \
+ N_("Use features of and schedule code for given CPU") }, \
+ {"tune=", &rs6000_select[2].string, \
+ N_("Schedule code for given CPU") }, \
+ {"debug=", &rs6000_debug_name, N_("Enable debug output") }, \
+ {"abi=", &rs6000_abi_string, N_("Specify ABI to use") }, \
+ {"long-double-", &rs6000_long_double_size_string, \
+ N_("Specify size of long double (64 or 128 bits)") }, \
+ SUBTARGET_OPTIONS \
+}
+
+/* rs6000_select[0] is reserved for the default cpu defined via --with-cpu */
+struct rs6000_cpu_select
+{
+ const char *string;
+ const char *name;
+ int set_tune_p;
+ int set_arch_p;
+};
+
+extern struct rs6000_cpu_select rs6000_select[];
+
+/* Debug support */
+extern const char *rs6000_debug_name; /* Name for -mdebug-xxxx option */
+extern const char *rs6000_abi_string; /* for -mabi={sysv,darwin,eabi,aix,altivec} */
+extern int rs6000_debug_stack; /* debug stack applications */
+extern int rs6000_debug_arg; /* debug argument handling */
+
+#define TARGET_DEBUG_STACK rs6000_debug_stack
+#define TARGET_DEBUG_ARG rs6000_debug_arg
+
+/* These are separate from target_flags because we've run out of bits
+ there. */
+extern const char *rs6000_long_double_size_string;
+extern int rs6000_long_double_type_size;
+extern int rs6000_altivec_abi;
+
+#define TARGET_LONG_DOUBLE_128 (rs6000_long_double_type_size == 128)
+#define TARGET_ALTIVEC_ABI rs6000_altivec_abi
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ Don't use this macro to turn on various extra optimizations for
+ `-O'. That is what `OPTIMIZATION_OPTIONS' is for.
+
+ On the RS/6000 this is used to define the target cpu type. */
+
+#define OVERRIDE_OPTIONS rs6000_override_options (TARGET_CPU_DEFAULT)
+
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) optimization_options(LEVEL,SIZE)
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* target machine storage layout */
+
+/* Define to support cross compilation to an RS6000 target. */
+#define REAL_ARITHMETIC
+
+/* Define this macro if it is advisable to hold scalars in registers
+ in a wider mode than that declared by the program. In such cases,
+ the value is constrained to be within the bounds of the declared
+ type, but kept valid in the wider mode. The signedness of the
+ extension may differ from that of the type. */
+
+#define PROMOTE_MODE(MODE,UNSIGNEDP,TYPE) \
+ if (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+ (MODE) = word_mode;
+
+/* Define this if function arguments should also be promoted using the above
+ procedure. */
+
+#define PROMOTE_FUNCTION_ARGS
+
+/* Likewise, if the function return value is promoted. */
+
+#define PROMOTE_FUNCTION_RETURN
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+/* That is true on RS/6000. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+/* That is true on RS/6000. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is lowest
+ numbered.
+
+ For RS/6000 we can decide arbitrarily since there are no machine
+ instructions for them. Might as well be consistent with bits and bytes. */
+#define WORDS_BIG_ENDIAN 1
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD (! TARGET_POWERPC64 ? 32 : 64)
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (! TARGET_POWERPC64 ? 4 : 8)
+#define MIN_UNITS_PER_WORD 4
+#define UNITS_PER_FP_WORD 8
+#define UNITS_PER_ALTIVEC_WORD 16
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+#define PTRDIFF_TYPE "int"
+
+/* Type used for size_t, as a string used in a declaration. */
+#define SIZE_TYPE "long unsigned int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+#define WCHAR_TYPE "short unsigned int"
+
+/* Width of wchar_t in bits. */
+#define WCHAR_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `short' on the
+ target machine. If you don't define this, the default is half a
+ word. (If this would be less than one storage unit, it is
+ rounded up to one unit.) */
+#define SHORT_TYPE_SIZE 16
+
+/* A C expression for the size in bits of the type `int' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define INT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `long' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define LONG_TYPE_SIZE (TARGET_32BIT ? 32 : 64)
+#define MAX_LONG_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `long long' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define LONG_LONG_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `char' on the
+ target machine. If you don't define this, the default is one
+ quarter of a word. (If this would be less than one storage unit,
+ it is rounded up to one unit.) */
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+
+/* A C expression for the size in bits of the type `float' on the
+ target machine. If you don't define this, the default is one
+ word. */
+#define FLOAT_TYPE_SIZE 32
+
+/* A C expression for the size in bits of the type `double' on the
+ target machine. If you don't define this, the default is two
+ words. */
+#define DOUBLE_TYPE_SIZE 64
+
+/* A C expression for the size in bits of the type `long double' on
+ the target machine. If you don't define this, the default is two
+ words. */
+#define LONG_DOUBLE_TYPE_SIZE rs6000_long_double_type_size
+
+/* Constant which presents upper bound of the above value. */
+#define MAX_LONG_DOUBLE_TYPE_SIZE 128
+
+/* Define this to set long double type size to use in libgcc2.c, which can
+ not depend on target_flags. */
+#ifdef __LONG_DOUBLE_128__
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 128
+#else
+#define LIBGCC2_LONG_DOUBLE_TYPE_SIZE 64
+#endif
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE (TARGET_32BIT ? 32 : 64)
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY (TARGET_32BIT ? 32 : 64)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY ((TARGET_32BIT && !TARGET_ALTIVEC_ABI) ? 64 : 128)
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY 32
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 128
+
+/* A C expression to compute the alignment for a variables in the
+ local store. TYPE is the data type, and ALIGN is the alignment
+ that the object would ordinarily have. */
+#define LOCAL_ALIGNMENT(TYPE, ALIGN) \
+ ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) ? 128 : ALIGN)
+
+/* Handle #pragma pack. */
+#define HANDLE_PRAGMA_PACK 1
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons.
+ Align vectors to 128 bits. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == VECTOR_TYPE ? 128 \
+ : TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+
+/* Non-zero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 0
+
+/* Define this macro to be the value 1 if unaligned accesses have a cost
+ many times greater than aligned accesses, for example if they are
+ emulated in a trap handler. */
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) \
+ (STRICT_ALIGNMENT \
+ || (((MODE) == SFmode || (MODE) == DFmode || (MODE) == DImode) \
+ && (ALIGN) < 32))
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ RS/6000 has 32 fixed-point registers, 32 floating-point registers,
+ an MQ register, a count register, a link register, and 8 condition
+ register fields, which we view here as separate registers.
+
+ In addition, the difference between the frame and argument pointers is
+ a function of the number of registers saved, so we need to have a
+ register for AP that will later be eliminated in favor of SP or FP.
+ This is a normal register, but it is fixed.
+
+ We also create a pseudo register for float/int conversions, that will
+ really represent the memory location used. It is represented here as
+ a register, in order to work around problems in allocating stack storage
+ in inline functions. */
+
+#define FIRST_PSEUDO_REGISTER 110
+
+/* This must be included for pre gcc 3.0 glibc compatibility. */
+#define PRE_GCC3_DWARF_FRAME_REGISTERS 77
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ On RS/6000, r1 is used for the stack. On Darwin, r2 is available
+ as a local register; for all other OS's r2 is the TOC pointer.
+
+ cr5 is not supposed to be used.
+
+ On System V implementations, r13 is fixed and not available for use. */
+
+#define FIXED_REGISTERS \
+ {0, 1, FIXED_R2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1 \
+}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1 \
+}
+
+/* Like `CALL_USED_REGISTERS' except this macro doesn't require that
+ the entire set of `FIXED_REGISTERS' be included.
+ (`CALL_USED_REGISTERS' must be a superset of `FIXED_REGISTERS').
+ This macro is optional. If not specified, it defaults to the value
+ of `CALL_USED_REGISTERS'. */
+
+#define CALL_REALLY_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, FIXED_R13, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, \
+ /* AltiVec registers. */ \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0 \
+}
+
+#define MQ_REGNO 64
+#define CR0_REGNO 68
+#define CR1_REGNO 69
+#define CR2_REGNO 70
+#define CR3_REGNO 71
+#define CR4_REGNO 72
+#define MAX_CR_REGNO 75
+#define XER_REGNO 76
+#define FIRST_ALTIVEC_REGNO 77
+#define LAST_ALTIVEC_REGNO 108
+#define TOTAL_ALTIVEC_REGS (LAST_ALTIVEC_REGNO - FIRST_ALTIVEC_REGNO)
+#define VRSAVE_REGNO 109
+
+/* List the order in which to allocate registers. Each register must be
+ listed once, even those in FIXED_REGISTERS.
+
+ We allocate in the following order:
+ fp0 (not saved or used for anything)
+ fp13 - fp2 (not saved; incoming fp arg registers)
+ fp1 (not saved; return value)
+ fp31 - fp14 (saved; order given to save least number)
+ cr7, cr6 (not saved or special)
+ cr1 (not saved, but used for FP operations)
+ cr0 (not saved, but used for arithmetic operations)
+ cr4, cr3, cr2 (saved)
+ r0 (not saved; cannot be base reg)
+ r9 (not saved; best for TImode)
+ r11, r10, r8-r4 (not saved; highest used first to make less conflict)
+ r3 (not saved; return value register)
+ r31 - r13 (saved; order given to save least number)
+ r12 (not saved; if used for DImode or DFmode would use r13)
+ mq (not saved; best to use it if we can)
+ ctr (not saved; when we have the choice ctr is better)
+ lr (saved)
+ cr5, r1, r2, ap, xer, vrsave (fixed)
+
+ AltiVec registers:
+ v0 - v1 (not saved or used for anything)
+ v13 - v3 (not saved; incoming vector arg registers)
+ v2 (not saved; incoming vector arg reg; return value)
+ v19 - v14 (not saved or used for anything)
+ v31 - v20 (saved; order given to save least number)
+*/
+
+
+#define REG_ALLOC_ORDER \
+ {32, \
+ 45, 44, 43, 42, 41, 40, 39, 38, 37, 36, 35, 34, \
+ 33, \
+ 63, 62, 61, 60, 59, 58, 57, 56, 55, 54, 53, 52, 51, \
+ 50, 49, 48, 47, 46, \
+ 75, 74, 69, 68, 72, 71, 70, \
+ 0, \
+ 9, 11, 10, 8, 7, 6, 5, 4, \
+ 3, \
+ 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, \
+ 18, 17, 16, 15, 14, 13, 12, \
+ 64, 66, 65, \
+ 73, 1, 2, 67, 76, \
+ /* AltiVec registers. */ \
+ 77, 78, \
+ 90, 89, 88, 87, 86, 85, 84, 83, 82, 81, 80, \
+ 79, \
+ 96, 95, 94, 93, 92, 91, \
+ 108, 107, 106, 105, 104, 103, 102, 101, 100, 99, 98, \
+ 97, 109 \
+}
+
+/* True if register is floating-point. */
+#define FP_REGNO_P(N) ((N) >= 32 && (N) <= 63)
+
+/* True if register is a condition register. */
+#define CR_REGNO_P(N) ((N) >= 68 && (N) <= 75)
+
+/* True if register is a condition register, but not cr0. */
+#define CR_REGNO_NOT_CR0_P(N) ((N) >= 69 && (N) <= 75)
+
+/* True if register is an integer register. */
+#define INT_REGNO_P(N) ((N) <= 31 || (N) == ARG_POINTER_REGNUM)
+
+/* True if register is the XER register. */
+#define XER_REGNO_P(N) ((N) == XER_REGNO)
+
+/* True if register is an AltiVec register. */
+#define ALTIVEC_REGNO_P(N) ((N) >= FIRST_ALTIVEC_REGNO && (N) <= LAST_ALTIVEC_REGNO)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ POWER and PowerPC GPRs hold 32 bits worth;
+ PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (FP_REGNO_P (REGNO) \
+ ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+ : ALTIVEC_REGNO_P (REGNO) \
+ ? ((GET_MODE_SIZE (MODE) + UNITS_PER_ALTIVEC_WORD - 1) / UNITS_PER_ALTIVEC_WORD) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+#define ALTIVEC_VECTOR_MODE(MODE) \
+ ((MODE) == V16QImode \
+ || (MODE) == V8HImode \
+ || (MODE) == V4SFmode \
+ || (MODE) == V4SImode)
+
+/* Define this macro to be nonzero if the port is prepared to handle
+ insns involving vector mode MODE. At the very least, it must have
+ move patterns for this mode. */
+
+#define VECTOR_MODE_SUPPORTED_P(MODE) \
+ (TARGET_ALTIVEC && ALTIVEC_VECTOR_MODE (MODE))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ For POWER and PowerPC, the GPRs can hold any mode, but the float
+ registers only can hold floating modes and DImode, and CR register only
+ can hold CC modes. We cannot put TImode anywhere except general
+ register and it must be able to fit within the register set. */
+
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ (FP_REGNO_P (REGNO) ? \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) == UNITS_PER_FP_WORD)) \
+ : ALTIVEC_REGNO_P (REGNO) ? ALTIVEC_VECTOR_MODE (MODE) \
+ : CR_REGNO_P (REGNO) ? GET_MODE_CLASS (MODE) == MODE_CC \
+ : XER_REGNO_P (REGNO) ? (MODE) == PSImode \
+ : ! INT_REGNO_P (REGNO) ? (GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) <= UNITS_PER_WORD) \
+ : 1)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ (GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ ? GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ : GET_MODE_CLASS (MODE2) == MODE_FLOAT \
+ ? GET_MODE_CLASS (MODE1) == MODE_FLOAT \
+ : GET_MODE_CLASS (MODE1) == MODE_CC \
+ ? GET_MODE_CLASS (MODE2) == MODE_CC \
+ : GET_MODE_CLASS (MODE2) == MODE_CC \
+ ? GET_MODE_CLASS (MODE1) == MODE_CC \
+ : ALTIVEC_VECTOR_MODE (MODE1) \
+ ? ALTIVEC_VECTOR_MODE (MODE2) \
+ : ALTIVEC_VECTOR_MODE (MODE2) \
+ ? ALTIVEC_VECTOR_MODE (MODE1) \
+ : 1)
+
+/* A C expression returning the cost of moving data from a register of class
+ CLASS1 to one of CLASS2.
+
+ On the RS/6000, copying between floating-point and fixed-point
+ registers is expensive. */
+
+#define REGISTER_MOVE_COST(MODE, CLASS1, CLASS2) \
+ ((CLASS1) == FLOAT_REGS && (CLASS2) == FLOAT_REGS ? 2 \
+ : (CLASS1) == FLOAT_REGS && (CLASS2) != FLOAT_REGS ? 10 \
+ : (CLASS1) != FLOAT_REGS && (CLASS2) == FLOAT_REGS ? 10 \
+ : (CLASS1) == ALTIVEC_REGS && (CLASS2) != ALTIVEC_REGS ? 20 \
+ : (CLASS1) != ALTIVEC_REGS && (CLASS2) == ALTIVEC_REGS ? 20 \
+ : (((CLASS1) == SPECIAL_REGS || (CLASS1) == MQ_REGS \
+ || (CLASS1) == LINK_REGS || (CLASS1) == CTR_REGS \
+ || (CLASS1) == LINK_OR_CTR_REGS) \
+ && ((CLASS2) == SPECIAL_REGS || (CLASS2) == MQ_REGS \
+ || (CLASS2) == LINK_REGS || (CLASS2) == CTR_REGS \
+ || (CLASS2) == LINK_OR_CTR_REGS)) ? 10 \
+ : 2)
+
+/* A C expressions returning the cost of moving data of MODE from a register to
+ or from memory.
+
+ On the RS/6000, bump this up a bit. */
+
+#define MEMORY_MOVE_COST(MODE, CLASS, IN) \
+ ((GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && (rs6000_cpu == PROCESSOR_RIOS1 || rs6000_cpu == PROCESSOR_PPC601) \
+ ? 3 : 2) \
+ + 4)
+
+/* Specify the cost of a branch insn; roughly the number of extra insns that
+ should be added to avoid a branch.
+
+ Set this to 3 on the RS/6000 since that is roughly the average cost of an
+ unscheduled conditional branch. */
+
+#define BRANCH_COST 3
+
+/* Define this macro to change register usage conditional on target flags.
+ Set MQ register fixed (already call_used) if not POWER architecture
+ (RIOS1, RIOS2, RSC, and PPC601) so that it will not be allocated.
+ 64-bit AIX reserves GPR13 for thread-private data.
+ Conditionally disable FPRs. */
+
+#define CONDITIONAL_REGISTER_USAGE \
+{ \
+ int i; \
+ if (! TARGET_POWER) \
+ fixed_regs[64] = 1; \
+ if (TARGET_64BIT) \
+ fixed_regs[13] = call_used_regs[13] \
+ = call_really_used_regs[13] = 1; \
+ if (TARGET_SOFT_FLOAT) \
+ for (i = 32; i < 64; i++) \
+ fixed_regs[i] = call_used_regs[i] \
+ = call_really_used_regs[i] = 1; \
+ if (DEFAULT_ABI == ABI_V4 && flag_pic == 1) \
+ fixed_regs[PIC_OFFSET_TABLE_REGNUM] \
+ = call_used_regs[PIC_OFFSET_TABLE_REGNUM] \
+ = call_really_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ if (DEFAULT_ABI == ABI_DARWIN && flag_pic) \
+ global_regs[PIC_OFFSET_TABLE_REGNUM] \
+ = fixed_regs[PIC_OFFSET_TABLE_REGNUM] \
+ = call_used_regs[PIC_OFFSET_TABLE_REGNUM] \
+ = call_really_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; \
+ if (! TARGET_ALTIVEC) \
+ for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i) \
+ fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1; \
+ if (TARGET_ALTIVEC_ABI) \
+ for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i) \
+ call_used_regs[i] = call_really_used_regs[i] = 1; \
+}
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* RS/6000 pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 1
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 31
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c. */
+#define FRAME_POINTER_REQUIRED 0
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM 67
+
+/* Place to put static chain when calling a function that requires it. */
+#define STATIC_CHAIN_REGNUM 11
+
+/* Link register number. */
+#define LINK_REGISTER_REGNUM 65
+
+/* Count register number. */
+#define COUNT_REGISTER_REGNUM 66
+
+/* Place that structure value return address is placed.
+
+ On the RS/6000, it is passed as an extra parameter. */
+#define STRUCT_VALUE 0
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The RS/6000 has three types of registers, fixed-point, floating-point,
+ and condition registers, plus three special registers, MQ, CTR, and the
+ link register.
+
+ However, r0 is special in that it cannot be used as a base register.
+ So make a class for registers valid as base registers.
+
+ Also, cr0 is the only condition code register that can be used in
+ arithmetic insns, so make a separate class for it. */
+
+enum reg_class
+{
+ NO_REGS,
+ BASE_REGS,
+ GENERAL_REGS,
+ FLOAT_REGS,
+ ALTIVEC_REGS,
+ VRSAVE_REGS,
+ NON_SPECIAL_REGS,
+ MQ_REGS,
+ LINK_REGS,
+ CTR_REGS,
+ LINK_OR_CTR_REGS,
+ SPECIAL_REGS,
+ SPEC_OR_GEN_REGS,
+ CR0_REGS,
+ CR_REGS,
+ NON_FLOAT_REGS,
+ XER_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+{ \
+ "NO_REGS", \
+ "BASE_REGS", \
+ "GENERAL_REGS", \
+ "FLOAT_REGS", \
+ "ALTIVEC_REGS", \
+ "VRSAVE_REGS", \
+ "NON_SPECIAL_REGS", \
+ "MQ_REGS", \
+ "LINK_REGS", \
+ "CTR_REGS", \
+ "LINK_OR_CTR_REGS", \
+ "SPECIAL_REGS", \
+ "SPEC_OR_GEN_REGS", \
+ "CR0_REGS", \
+ "CR_REGS", \
+ "NON_FLOAT_REGS", \
+ "XER_REGS", \
+ "ALL_REGS" \
+}
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+{ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
+ { 0xfffffffe, 0x00000000, 0x00000008, 0x00000000 }, /* BASE_REGS */ \
+ { 0xffffffff, 0x00000000, 0x00000008, 0x00000000 }, /* GENERAL_REGS */ \
+ { 0x00000000, 0xffffffff, 0x00000000, 0x00000000 }, /* FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0xffffe000, 0x00001fff }, /* ALTIVEC_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000000, 0x00002000 }, /* VRSAVE_REGS */ \
+ { 0xffffffff, 0xffffffff, 0x00000008, 0x00000000 }, /* NON_SPECIAL_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000001, 0x00000000 }, /* MQ_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000002, 0x00000000 }, /* LINK_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000004, 0x00000000 }, /* CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000006, 0x00000000 }, /* LINK_OR_CTR_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000007, 0x00002000 }, /* SPECIAL_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000000f, 0x00000000 }, /* SPEC_OR_GEN_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000010, 0x00000000 }, /* CR0_REGS */ \
+ { 0x00000000, 0x00000000, 0x00000ff0, 0x00000000 }, /* CR_REGS */ \
+ { 0xffffffff, 0x00000000, 0x0000efff, 0x00000000 }, /* NON_FLOAT_REGS */ \
+ { 0x00000000, 0x00000000, 0x00001000, 0x00000000 }, /* XER_REGS */ \
+ { 0xffffffff, 0xffffffff, 0xffffffff, 0x00003fff } /* ALL_REGS */ \
+}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == 0 ? GENERAL_REGS \
+ : (REGNO) < 32 ? BASE_REGS \
+ : FP_REGNO_P (REGNO) ? FLOAT_REGS \
+ : ALTIVEC_REGNO_P (REGNO) ? ALTIVEC_REGS \
+ : (REGNO) == CR0_REGNO ? CR0_REGS \
+ : CR_REGNO_P (REGNO) ? CR_REGS \
+ : (REGNO) == MQ_REGNO ? MQ_REGS \
+ : (REGNO) == LINK_REGISTER_REGNUM ? LINK_REGS \
+ : (REGNO) == COUNT_REGISTER_REGNUM ? CTR_REGS \
+ : (REGNO) == ARG_POINTER_REGNUM ? BASE_REGS \
+ : (REGNO) == XER_REGNO ? XER_REGS \
+ : (REGNO) == VRSAVE_REGNO ? VRSAVE_REGS \
+ : NO_REGS)
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS BASE_REGS
+
+/* Get reg_class from a letter such as appears in the machine description. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+ ((C) == 'f' ? FLOAT_REGS \
+ : (C) == 'b' ? BASE_REGS \
+ : (C) == 'h' ? SPECIAL_REGS \
+ : (C) == 'q' ? MQ_REGS \
+ : (C) == 'c' ? CTR_REGS \
+ : (C) == 'l' ? LINK_REGS \
+ : (C) == 'v' ? ALTIVEC_REGS \
+ : (C) == 'x' ? CR0_REGS \
+ : (C) == 'y' ? CR_REGS \
+ : (C) == 'z' ? XER_REGS \
+ : NO_REGS)
+
+/* The letters I, J, K, L, M, N, and P in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ `I' is a signed 16-bit constant
+ `J' is a constant with only the high-order 16 bits non-zero
+ `K' is a constant with only the low-order 16 bits non-zero
+ `L' is a signed 16-bit constant shifted left 16 bits
+ `M' is a constant that is greater than 31
+ `N' is a positive constant that is an exact power of two
+ `O' is the constant zero
+ `P' is a constant whose negation is a signed 16-bit constant */
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ( (C) == 'I' ? (unsigned HOST_WIDE_INT) ((VALUE) + 0x8000) < 0x10000 \
+ : (C) == 'J' ? ((VALUE) & (~ (unsigned HOST_WIDE_INT) 0xffff0000)) == 0 \
+ : (C) == 'K' ? ((VALUE) & (~ (HOST_WIDE_INT) 0xffff)) == 0 \
+ : (C) == 'L' ? (((VALUE) & 0xffff) == 0 \
+ && ((VALUE) >> 31 == -1 || (VALUE) >> 31 == 0)) \
+ : (C) == 'M' ? (VALUE) > 31 \
+ : (C) == 'N' ? (VALUE) > 0 && exact_log2 (VALUE) >= 0 \
+ : (C) == 'O' ? (VALUE) == 0 \
+ : (C) == 'P' ? (unsigned HOST_WIDE_INT) ((- (VALUE)) + 0x8000) < 0x10000 \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself.
+
+ We flag for special constants when we can copy the constant into
+ a general register in two insns for DF/DI and one insn for SF.
+
+ 'H' is used for DI/DF constants that take 3 insns. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ( (C) == 'G' ? (num_insns_constant (VALUE, GET_MODE (VALUE)) \
+ == ((GET_MODE (VALUE) == SFmode) ? 1 : 2)) \
+ : (C) == 'H' ? (num_insns_constant (VALUE, GET_MODE (VALUE)) == 3) \
+ : 0)
+
+/* Optional extra constraints for this machine.
+
+ 'Q' means that is a memory operand that is just an offset from a reg.
+ 'R' is for AIX TOC entries.
+ 'S' is a constant that can be placed into a 64-bit mask operand
+ 'T' is a consatnt that can be placed into a 32-bit mask operand
+ 'U' is for V.4 small data references. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) == REG \
+ : (C) == 'R' ? LEGITIMATE_CONSTANT_POOL_ADDRESS_P (OP) \
+ : (C) == 'S' ? mask64_operand (OP, VOIDmode) \
+ : (C) == 'T' ? mask_operand (OP, VOIDmode) \
+ : (C) == 'U' ? (DEFAULT_ABI == ABI_V4 \
+ && small_data_operand (OP, GET_MODE (OP))) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class.
+
+ On the RS/6000, we have to return NO_REGS when we want to reload a
+ floating-point CONST_DOUBLE to force it to be copied to memory.
+
+ We also don't want to reload integer values into floating-point
+ registers if we can at all help it. In fact, this can
+ cause reload to abort, if it tries to generate a reload of CTR
+ into a FP register and discovers it doesn't have the memory location
+ required.
+
+ ??? Would it be a good idea to have reload do the converse, that is
+ try to reload floating modes into FP registers if possible?
+ */
+
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (((GET_CODE (X) == CONST_DOUBLE \
+ && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT) \
+ ? NO_REGS \
+ : (GET_MODE_CLASS (GET_MODE (X)) == MODE_INT \
+ && (CLASS) == NON_SPECIAL_REGS) \
+ ? GENERAL_REGS \
+ : (CLASS)))
+
+/* Return the register class of a scratch register needed to copy IN into
+ or out of a register in CLASS in MODE. If it can be done directly,
+ NO_REGS is returned. */
+
+#define SECONDARY_RELOAD_CLASS(CLASS,MODE,IN) \
+ secondary_reload_class (CLASS, MODE, IN)
+
+/* If we are copying between FP or AltiVec registers and anything
+ else, we need a memory location. */
+
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ ((CLASS1) != (CLASS2) && ((CLASS1) == FLOAT_REGS \
+ || (CLASS2) == FLOAT_REGS \
+ || (CLASS1) == ALTIVEC_REGS \
+ || (CLASS2) == ALTIVEC_REGS))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS.
+
+ On RS/6000, this is the size of MODE in words,
+ except in the FP regs, where a single reg is enough for two words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (((CLASS) == FLOAT_REGS) \
+ ? ((GET_MODE_SIZE (MODE) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* If defined, gives a class of registers that cannot be used as the
+ operand of a SUBREG that changes the mode of the object illegally. */
+
+#define CLASS_CANNOT_CHANGE_MODE FLOAT_REGS
+
+/* Defines illegal mode changes for CLASS_CANNOT_CHANGE_MODE. */
+
+#define CLASS_CANNOT_CHANGE_MODE_P(FROM,TO) \
+ (GET_MODE_SIZE (FROM) != GET_MODE_SIZE (TO))
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Enumeration to give which calling sequence to use. */
+enum rs6000_abi {
+ ABI_NONE,
+ ABI_AIX, /* IBM's AIX */
+ ABI_AIX_NODESC, /* AIX calling sequence minus
+ function descriptors */
+ ABI_V4, /* System V.4/eabi */
+ ABI_DARWIN /* Apple's Darwin (OS X kernel) */
+};
+
+extern enum rs6000_abi rs6000_current_abi; /* available for use by subtarget */
+
+/* Structure used to define the rs6000 stack */
+typedef struct rs6000_stack {
+ int first_gp_reg_save; /* first callee saved GP register used */
+ int first_fp_reg_save; /* first callee saved FP register used */
+ int first_altivec_reg_save; /* first callee saved AltiVec register used */
+ int lr_save_p; /* true if the link reg needs to be saved */
+ int cr_save_p; /* true if the CR reg needs to be saved */
+ unsigned int vrsave_mask; /* mask of vec registers to save */
+ int toc_save_p; /* true if the TOC needs to be saved */
+ int push_p; /* true if we need to allocate stack space */
+ int calls_p; /* true if the function makes any calls */
+ enum rs6000_abi abi; /* which ABI to use */
+ int gp_save_offset; /* offset to save GP regs from initial SP */
+ int fp_save_offset; /* offset to save FP regs from initial SP */
+ int altivec_save_offset; /* offset to save AltiVec regs from inital SP */
+ int lr_save_offset; /* offset to save LR from initial SP */
+ int cr_save_offset; /* offset to save CR from initial SP */
+ int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
+ int toc_save_offset; /* offset to save the TOC pointer */
+ int varargs_save_offset; /* offset to save the varargs registers */
+ int ehrd_offset; /* offset to EH return data */
+ int reg_size; /* register size (4 or 8) */
+ int varargs_size; /* size to hold V.4 args passed in regs */
+ int vars_size; /* variable save area size */
+ int parm_size; /* outgoing parameter size */
+ int save_size; /* save area size */
+ int fixed_size; /* fixed size of stack frame */
+ int gp_size; /* size of saved GP registers */
+ int fp_size; /* size of saved FP registers */
+ int altivec_size; /* size of saved AltiVec registers */
+ int cr_size; /* size to hold CR if not in save_size */
+ int lr_size; /* size to hold LR if not in save_size */
+ int vrsave_size; /* size to hold VRSAVE if not in save_size */
+ int altivec_padding_size; /* size of altivec alignment padding if
+ not in save_size */
+ int toc_size; /* size to hold TOC if not in save_size */
+ int total_size; /* total bytes allocated for stack */
+} rs6000_stack_t;
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame.
+
+ On the RS/6000, we grow upwards, from the area after the outgoing
+ arguments. */
+/* #define FRAME_GROWS_DOWNWARD */
+
+/* Size of the outgoing register save area */
+#define RS6000_REG_SAVE ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_AIX_NODESC \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? (TARGET_64BIT ? 64 : 32) \
+ : 0)
+
+/* Size of the fixed area on the stack */
+#define RS6000_SAVE_AREA \
+ (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_AIX_NODESC || DEFAULT_ABI == ABI_DARWIN) ? 24 : 8) \
+ << (TARGET_64BIT ? 1 : 0))
+
+/* MEM representing address to save the TOC register */
+#define RS6000_SAVE_TOC gen_rtx_MEM (Pmode, \
+ plus_constant (stack_pointer_rtx, \
+ (TARGET_32BIT ? 20 : 40)))
+
+/* Size of the V.4 varargs area if needed */
+#define RS6000_VARARGS_AREA 0
+
+/* Align an address */
+#define RS6000_ALIGN(n,a) (((n) + (a) - 1) & ~((a) - 1))
+
+/* Size of V.4 varargs area in bytes */
+#define RS6000_VARARGS_SIZE \
+ ((GP_ARG_NUM_REG * (TARGET_32BIT ? 4 : 8)) + (FP_ARG_NUM_REG * 8) + 8)
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated.
+
+ On the RS/6000, the frame pointer is the same as the stack pointer,
+ except for dynamic allocations. So we start after the fixed area and
+ outgoing parameter area. */
+
+#define STARTING_FRAME_OFFSET \
+ (RS6000_ALIGN (current_function_outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + RS6000_VARARGS_AREA \
+ + RS6000_SAVE_AREA)
+
+/* Offset from the stack pointer register to an item dynamically
+ allocated on the stack, e.g., by `alloca'.
+
+ The default value for this macro is `STACK_POINTER_OFFSET' plus the
+ length of the outgoing arguments. The default is correct for most
+ machines. See `function.c' for details. */
+#define STACK_DYNAMIC_OFFSET(FUNDECL) \
+ (RS6000_ALIGN (current_function_outgoing_args_size, \
+ TARGET_ALTIVEC ? 16 : 8) \
+ + (STACK_POINTER_OFFSET))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On RS/6000, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Offset of first parameter from the argument pointer register value.
+ On the RS/6000, we define the argument pointer to the start of the fixed
+ area. */
+#define FIRST_PARM_OFFSET(FNDECL) RS6000_SAVE_AREA
+
+/* Offset from the argument pointer register value to the top of
+ stack. This is different from FIRST_PARM_OFFSET because of the
+ register save area. */
+#define ARG_POINTER_CFA_OFFSET(FNDECL) 0
+
+/* Define this if stack space is still allocated for a parameter passed
+ in a register. The value is the number of bytes allocated to this
+ area. */
+#define REG_PARM_STACK_SPACE(FNDECL) RS6000_REG_SAVE
+
+/* Define this if the above stack space is to be considered part of the
+ space allocated by the caller. */
+#define OUTGOING_REG_PARM_STACK_SPACE
+
+/* This is the difference between the logical top of stack and the actual sp.
+
+ For the RS/6000, sp points past the fixed area. */
+#define STACK_POINTER_OFFSET RS6000_SAVE_AREA
+
+/* Define this if the maximum size of all the outgoing args is to be
+ accumulated and pushed during the prologue. The amount can be
+ found in the variable current_function_outgoing_args_size. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0.
+
+ On RS/6000 an integer value is in r3 and a floating-point value is in
+ fp1, unless -msoft-float. */
+
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ gen_rtx_REG ((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ || POINTER_TYPE_P (VALTYPE) \
+ ? word_mode : TYPE_MODE (VALTYPE), \
+ TREE_CODE (VALTYPE) == VECTOR_TYPE ? ALTIVEC_ARG_RETURN \
+ : TREE_CODE (VALTYPE) == REAL_TYPE && TARGET_HARD_FLOAT \
+ ? FP_ARG_RETURN : GP_ARG_RETURN)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ gen_rtx_REG (MODE, ALTIVEC_VECTOR_MODE (MODE) ? ALTIVEC_ARG_RETURN \
+ : GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && TARGET_HARD_FLOAT \
+ ? FP_ARG_RETURN : GP_ARG_RETURN)
+
+/* The AIX ABI for the RS/6000 specifies that all structures are
+ returned in memory. The Darwin ABI does the same. The SVR4 ABI
+ specifies that structures <= 8 bytes are returned in r3/r4, but a
+ draft put them in memory, and GCC used to implement the draft
+ instead of the final standard. Therefore, TARGET_AIX_STRUCT_RET
+ controls this instead of DEFAULT_ABI; V.4 targets needing backward
+ compatibility can change DRAFT_V4_STRUCT_RET to override the
+ default, and -m switches get the final word. See
+ rs6000_override_options for more details.
+
+ int_size_in_bytes returns -1 for variable size objects, which go in
+ memory always. The cast to unsigned makes -1 > 8. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+ (AGGREGATE_TYPE_P (TYPE) && \
+ (TARGET_AIX_STRUCT_RET || \
+ (unsigned HOST_WIDEST_INT) int_size_in_bytes (TYPE) > 8))
+
+/* DRAFT_V4_STRUCT_RET defaults off. */
+#define DRAFT_V4_STRUCT_RET 0
+
+/* Let RETURN_IN_MEMORY control what happens. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Mode of stack savearea.
+ FUNCTION is VOIDmode because calling convention maintains SP.
+ BLOCK needs Pmode for SP.
+ NONLOCAL needs twice Pmode to maintain both backchain and SP. */
+#define STACK_SAVEAREA_MODE(LEVEL) \
+ (LEVEL == SAVE_FUNCTION ? VOIDmode \
+ : LEVEL == SAVE_NONLOCAL ? (TARGET_32BIT ? DImode : TImode) : Pmode)
+
+/* Minimum and maximum general purpose registers used to hold arguments. */
+#define GP_ARG_MIN_REG 3
+#define GP_ARG_MAX_REG 10
+#define GP_ARG_NUM_REG (GP_ARG_MAX_REG - GP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum floating point registers used to hold arguments. */
+#define FP_ARG_MIN_REG 33
+#define FP_ARG_AIX_MAX_REG 45
+#define FP_ARG_V4_MAX_REG 40
+#define FP_ARG_MAX_REG ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_AIX_NODESC \
+ || DEFAULT_ABI == ABI_DARWIN) \
+ ? FP_ARG_AIX_MAX_REG : FP_ARG_V4_MAX_REG)
+#define FP_ARG_NUM_REG (FP_ARG_MAX_REG - FP_ARG_MIN_REG + 1)
+
+/* Minimum and maximum AltiVec registers used to hold arguments. */
+#define ALTIVEC_ARG_MIN_REG (FIRST_ALTIVEC_REGNO + 2)
+#define ALTIVEC_ARG_MAX_REG (ALTIVEC_ARG_MIN_REG + 11)
+#define ALTIVEC_ARG_NUM_REG (ALTIVEC_ARG_MAX_REG - ALTIVEC_ARG_MIN_REG + 1)
+
+/* Return registers */
+#define GP_ARG_RETURN GP_ARG_MIN_REG
+#define FP_ARG_RETURN FP_ARG_MIN_REG
+#define ALTIVEC_ARG_RETURN (FIRST_ALTIVEC_REGNO + 2)
+
+/* Flags for the call/call_value rtl operations set up by function_arg */
+#define CALL_NORMAL 0x00000000 /* no special processing */
+/* Bits in 0x00000001 are unused. */
+#define CALL_V4_CLEAR_FP_ARGS 0x00000002 /* V.4, no FP args passed */
+#define CALL_V4_SET_FP_ARGS 0x00000004 /* V.4, FP args were passed */
+#define CALL_LONG 0x00000008 /* always call indirect */
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+
+ On RS/6000, this is r3, fp1, and v2 (for AltiVec). */
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_ARG_RETURN \
+ || ((N) == FP_ARG_RETURN) \
+ || (TARGET_ALTIVEC && \
+ (N) == ALTIVEC_ARG_RETURN))
+
+/* 1 if N is a possible register number for function argument passing.
+ On RS/6000, these are r3-r10 and fp1-fp13.
+ On AltiVec, v2 - v13 are used for passing vectors. */
+#define FUNCTION_ARG_REGNO_P(N) \
+ ((unsigned)(((N) - GP_ARG_MIN_REG) < (unsigned)(GP_ARG_NUM_REG)) \
+ || (TARGET_ALTIVEC && \
+ (unsigned)((N) - ALTIVEC_ARG_MIN_REG) < (unsigned)(ALTIVEC_ARG_NUM_REG)) \
+ || ((unsigned)((N) - FP_ARG_MIN_REG) < (unsigned)(FP_ARG_NUM_REG)))
+
+
+/* A C structure for machine-specific, per-function data.
+ This is added to the cfun structure. */
+typedef struct machine_function
+{
+ /* Whether a System V.4 varargs area was created. */
+ int sysv_varargs_p;
+ /* Flags if __builtin_return_address (n) with n >= 1 was used. */
+ int ra_needs_full_frame;
+} machine_function;
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On the RS/6000, this is a structure. The first element is the number of
+ total argument words, the second is used to store the next
+ floating-point register number, and the third says how many more args we
+ have prototype types for.
+
+ For ABI_V4, we treat these slightly differently -- `sysv_gregno' is
+ the next availible GP register, `fregno' is the next available FP
+ register, and `words' is the number of words used on the stack.
+
+ The varargs/stdarg support requires that this structure's size
+ be a multiple of sizeof(int). */
+
+typedef struct rs6000_args
+{
+ int words; /* # words used for passing GP registers */
+ int fregno; /* next available FP register */
+ int vregno; /* next available AltiVec register */
+ int nargs_prototype; /* # args left in the current prototype */
+ int orig_nargs; /* Original value of nargs_prototype */
+ int prototype; /* Whether a prototype was defined */
+ int call_cookie; /* Do special things for this call */
+ int sysv_gregno; /* next available GP register */
+} CUMULATIVE_ARGS;
+
+/* Define intermediate macro to compute the size (in registers) of an argument
+ for the RS/6000. */
+
+#define RS6000_ARG_SIZE(MODE, TYPE) \
+((MODE) != BLKmode \
+ ? (GET_MODE_SIZE (MODE) + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD \
+ : ((unsigned HOST_WIDE_INT) int_size_in_bytes (TYPE) \
+ + (UNITS_PER_WORD - 1)) / UNITS_PER_WORD)
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, FALSE)
+
+/* Similar, but when scanning the definition of a procedure. We always
+ set NARGS_PROTOTYPE large so we never return an EXPR_LIST. */
+
+#define INIT_CUMULATIVE_INCOMING_ARGS(CUM,FNTYPE,LIBNAME) \
+ init_cumulative_args (&CUM, FNTYPE, LIBNAME, TRUE)
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_advance (&CUM, MODE, TYPE, NAMED)
+
+/* Non-zero if we can use a floating-point register to pass this arg. */
+#define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
+ (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ && (CUM).fregno <= FP_ARG_MAX_REG \
+ && TARGET_HARD_FLOAT)
+
+/* Non-zero if we can use an AltiVec register to pass this arg. */
+#define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE) \
+ (ALTIVEC_VECTOR_MODE (MODE) \
+ && (CUM).vregno <= ALTIVEC_ARG_MAX_REG \
+ && TARGET_ALTIVEC_ABI)
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On RS/6000 the first eight words of non-FP are normally in registers
+ and the rest are pushed. The first 13 FP args are in registers.
+
+ If this is floating-point and no prototype is specified, we use
+ both an FP and integer register (or possibly FP reg and stack). Library
+ functions (when TYPE is zero) always have the proper types for args,
+ so we can pass the FP value just in one register. emit_library_function
+ doesn't support EXPR_LIST anyway. */
+
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+ function_arg (&CUM, MODE, TYPE, NAMED)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+ function_arg_partial_nregs (&CUM, MODE, TYPE, NAMED)
+
+/* A C expression that indicates when an argument must be passed by
+ reference. If nonzero for an argument, a copy of that argument is
+ made in memory and a pointer to the argument is passed instead of
+ the argument itself. The pointer is passed in whatever way is
+ appropriate for passing a pointer to that type. */
+
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+ function_arg_pass_by_reference(&CUM, MODE, TYPE, NAMED)
+
+/* If defined, a C expression which determines whether, and in which
+ direction, to pad out an argument with extra space. The value
+ should be of type `enum direction': either `upward' to pad above
+ the argument, `downward' to pad below, or `none' to inhibit
+ padding. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) function_arg_padding (MODE, TYPE)
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+ function_arg_boundary (MODE, TYPE)
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as above.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed. */
+
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+ setup_incoming_varargs (&CUM, MODE, TYPE, &PRETEND_SIZE, NO_RTL)
+
+/* Define the `__builtin_va_list' type for the ABI. */
+#define BUILD_VA_LIST_TYPE(VALIST) \
+ (VALIST) = rs6000_build_va_list ()
+
+/* Implement `va_start' for varargs and stdarg. */
+#define EXPAND_BUILTIN_VA_START(stdarg, valist, nextarg) \
+ rs6000_va_start (stdarg, valist, nextarg)
+
+/* Implement `va_arg'. */
+#define EXPAND_BUILTIN_VA_ARG(valist, type) \
+ rs6000_va_arg (valist, type)
+
+/* Define this macro to be a nonzero value if the location where a function
+ argument is passed depends on whether or not it is a named argument. */
+#define STRICT_ARGUMENT_NAMING 1
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ output_function_profiler ((FILE), (LABELNO));
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. No definition is equivalent to
+ always zero.
+
+ On the RS/6000, this is non-zero because we can restore the stack from
+ its backpointer, which we maintain. */
+#define EXIT_IGNORE_STACK 1
+
+/* Define this macro as a C expression that is nonzero for registers
+ that are used by the epilogue or the return' pattern. The stack
+ and frame pointer registers are already be assumed to be used as
+ needed. */
+
+#define EPILOGUE_USES(REGNO) \
+ ((reload_completed && (REGNO) == LINK_REGISTER_REGNUM) \
+ || (REGNO) == VRSAVE_REGNO \
+ || (current_function_calls_eh_return \
+ && TARGET_AIX \
+ && (REGNO) == TOC_REGISTER))
+
+
+/* TRAMPOLINE_TEMPLATE deleted */
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE rs6000_trampoline_size ()
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#define INITIALIZE_TRAMPOLINE(ADDR, FNADDR, CXT) \
+ rs6000_initialize_trampoline (ADDR, FNADDR, CXT)
+
+/* Definitions for __builtin_return_address and __builtin_frame_address.
+ __builtin_return_address (0) should give link register (65), enable
+ this. */
+/* This should be uncommented, so that the link register is used, but
+ currently this would result in unmatched insns and spilling fixed
+ registers so we'll leave it for another day. When these problems are
+ taken care of one additional fetch will be necessary in RETURN_ADDR_RTX.
+ (mrs) */
+/* #define RETURN_ADDR_IN_PREVIOUS_FRAME */
+
+/* Number of bytes into the frame return addresses can be found. See
+ rs6000_stack_info in rs6000.c for more information on how the different
+ abi's store the return address. */
+#define RETURN_ADDRESS_OFFSET \
+ ((DEFAULT_ABI == ABI_AIX \
+ || DEFAULT_ABI == ABI_DARWIN \
+ || DEFAULT_ABI == ABI_AIX_NODESC) ? (TARGET_32BIT ? 8 : 16) : \
+ (DEFAULT_ABI == ABI_V4) ? 4 : \
+ (internal_error ("RETURN_ADDRESS_OFFSET not supported"), 0))
+
+/* The current return address is in link register (65). The return address
+ of anything farther back is accessed normally at an offset of 8 from the
+ frame pointer. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ (rs6000_return_addr (COUNT, FRAME))
+
+
+/* Definitions for register eliminations.
+
+ We have two registers that can be eliminated on the RS/6000. First, the
+ frame pointer register can often be eliminated in favor of the stack
+ pointer register. Secondly, the argument pointer register can always be
+ eliminated; it is replaced with either the stack or frame pointer.
+
+ In addition, we use the elimination mechanism to see if r30 is needed
+ Initially we assume that it isn't. If it is, we spill it. This is done
+ by making it an eliminable register. We replace it with itself so that
+ if it isn't needed, then existing uses won't be modified. */
+
+/* This is an array of structures. Each structure initializes one pair
+ of eliminable registers. The "from" register number is given first,
+ followed by "to". Eliminations of the same "from" register are listed
+ in order of preference. */
+#define ELIMINABLE_REGS \
+{{ FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ { 30, 30} }
+
+/* Given FROM and TO register numbers, say whether this elimination is allowed.
+ Frame pointer elimination is automatically handled.
+
+ For the RS/6000, if frame pointer elimination is being done, we would like
+ to convert ap into fp, not sp.
+
+ We need r30 if -mminimal-toc was specified, and there are constant pool
+ references. */
+
+#define CAN_ELIMINATE(FROM, TO) \
+ ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM \
+ ? ! frame_pointer_needed \
+ : (FROM) == 30 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0 \
+ : 1)
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ \
+ rs6000_stack_t *info = rs6000_stack_info (); \
+ \
+ if ((FROM) == FRAME_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (info->push_p) ? 0 : - info->total_size; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = info->total_size; \
+ else if ((FROM) == ARG_POINTER_REGNUM && (TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) = (info->push_p) ? info->total_size : 0; \
+ else if ((FROM) == 30) \
+ (OFFSET) = 0; \
+ else \
+ abort (); \
+}
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT 0 */
+/* #define HAVE_POST_DECREMENT 0 */
+
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? (REGNO) <= 31 || (REGNO) == 67 \
+ : (reg_renumber[REGNO] >= 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67)))
+
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < FIRST_PSEUDO_REGISTER \
+ ? ((REGNO) > 0 && (REGNO) <= 31) || (REGNO) == 67 \
+ : (reg_renumber[REGNO] > 0 \
+ && (reg_renumber[REGNO] <= 31 || reg_renumber[REGNO] == 67)))
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST \
+ || GET_CODE (X) == HIGH)
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ It is given that X satisfies CONSTANT_P or is a CONST_DOUBLE.
+
+ On the RS/6000, all integer constants are acceptable, most won't be valid
+ for particular insns, though. Only easy FP constants are
+ acceptable. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) != CONST_DOUBLE || GET_MODE (X) == VOIDmode \
+ || (TARGET_POWERPC64 && GET_MODE (X) == DImode) \
+ || easy_fp_constant (X, GET_MODE (X)))
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+#ifdef REG_OK_STRICT
+# define REG_OK_STRICT_FLAG 1
+#else
+# define REG_OK_STRICT_FLAG 0
+#endif
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_INDEX_P(X, STRICT) \
+ ((! (STRICT) \
+ && (REGNO (X) <= 31 \
+ || REGNO (X) == ARG_POINTER_REGNUM \
+ || REGNO (X) >= FIRST_PSEUDO_REGISTER)) \
+ || ((STRICT) && REGNO_OK_FOR_INDEX_P (REGNO (X))))
+
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg in the non-strict case. */
+#define INT_REG_OK_FOR_BASE_P(X, STRICT) \
+ (REGNO (X) > 0 && INT_REG_OK_FOR_INDEX_P (X, (STRICT)))
+
+#define REG_OK_FOR_INDEX_P(X) INT_REG_OK_FOR_INDEX_P (X, REG_OK_STRICT_FLAG)
+#define REG_OK_FOR_BASE_P(X) INT_REG_OK_FOR_BASE_P (X, REG_OK_STRICT_FLAG)
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On the RS/6000, there are four valid address: a SYMBOL_REF that
+ refers to a constant pool entry of an address (or the sum of it
+ plus a constant), a short (16-bit signed) constant plus a register,
+ the sum of two registers, or a register indirect, possibly with an
+ auto-increment. For DFmode and DImode with an constant plus register,
+ we must ensure that both words are addressable or PowerPC64 with offset
+ word aligned.
+
+ For modes spanning multiple registers (DFmode in 32-bit GPRs,
+ 32-bit DImode, TImode), indexed addressing cannot be used because
+ adjacent memory cells are accessed by adding word-sized offsets
+ during assembly output. */
+
+#define CONSTANT_POOL_EXPR_P(X) (constant_pool_expr_p (X))
+
+#define TOC_RELATIVE_EXPR_P(X) (toc_relative_expr_p (X))
+
+#define LEGITIMATE_CONSTANT_POOL_ADDRESS_P(X) \
+ (TARGET_TOC \
+ && GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && (TARGET_MINIMAL_TOC || REGNO (XEXP (X, 0)) == TOC_REGISTER) \
+ && CONSTANT_POOL_EXPR_P (XEXP (X, 1)))
+
+#define LEGITIMATE_SMALL_DATA_P(MODE, X) \
+ (DEFAULT_ABI == ABI_V4 \
+ && !flag_pic && !TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == CONST) \
+ && small_data_operand (X, MODE))
+
+#define LEGITIMATE_ADDRESS_INTEGER_P(X, OFFSET) \
+ (GET_CODE (X) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) (INTVAL (X) + (OFFSET) + 0x8000) < 0x10000)
+
+#define LEGITIMATE_OFFSET_ADDRESS_P(MODE, X, STRICT) \
+ (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && INT_REG_OK_FOR_BASE_P (XEXP (X, 0), (STRICT)) \
+ && LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 0) \
+ && (! ALTIVEC_VECTOR_MODE (MODE) || INTVAL (X) == 0) \
+ && (((MODE) != DFmode && (MODE) != DImode) \
+ || (TARGET_32BIT \
+ ? LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 4) \
+ : ! (INTVAL (XEXP (X, 1)) & 3))) \
+ && ((MODE) != TImode \
+ || (TARGET_32BIT \
+ ? LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 12) \
+ : (LEGITIMATE_ADDRESS_INTEGER_P (XEXP (X, 1), 8) \
+ && ! (INTVAL (XEXP (X, 1)) & 3)))))
+
+#define LEGITIMATE_INDEXED_ADDRESS_P(X, STRICT) \
+ (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && GET_CODE (XEXP (X, 1)) == REG \
+ && ((INT_REG_OK_FOR_BASE_P (XEXP (X, 0), (STRICT)) \
+ && INT_REG_OK_FOR_INDEX_P (XEXP (X, 1), (STRICT))) \
+ || (INT_REG_OK_FOR_BASE_P (XEXP (X, 1), (STRICT)) \
+ && INT_REG_OK_FOR_INDEX_P (XEXP (X, 0), (STRICT)))))
+
+#define LEGITIMATE_INDIRECT_ADDRESS_P(X, STRICT) \
+ (GET_CODE (X) == REG && INT_REG_OK_FOR_BASE_P (X, (STRICT)))
+
+#define LEGITIMATE_LO_SUM_ADDRESS_P(MODE, X, STRICT) \
+ (TARGET_ELF \
+ && ! flag_pic && ! TARGET_TOC \
+ && GET_MODE_NUNITS (MODE) == 1 \
+ && (GET_MODE_BITSIZE (MODE) <= 32 \
+ || (TARGET_HARD_FLOAT && (MODE) == DFmode)) \
+ && GET_CODE (X) == LO_SUM \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && INT_REG_OK_FOR_BASE_P (XEXP (X, 0), (STRICT)) \
+ && CONSTANT_P (XEXP (X, 1)))
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (rs6000_legitimate_address (MODE, X, REG_OK_STRICT_FLAG)) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output.
+
+ On RS/6000, first check for the sum of a register with a constant
+ integer that is out of range. If so, generate code to add the
+ constant with the low-order 16 bits masked to the register and force
+ this result into another register (this can be done with `cau').
+ Then generate an address of REG+(CONST&0xffff), allowing for the
+ possibility of bit 16 being a one.
+
+ Then check for the sum of a register and something not constant, try to
+ load the other things into a register and return the sum. */
+
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ rtx result = rs6000_legitimize_address (X, OLDX, MODE); \
+ if (result != NULL_RTX) \
+ { \
+ (X) = result; \
+ goto WIN; \
+ } \
+}
+
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ Implemented on rs6000 by rs6000_legitimize_reload_address.
+ Note that (X) is evaluated twice; this is safe in current usage. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ int win; \
+ (X) = rs6000_legitimize_reload_address ((X), (MODE), (OPNUM), \
+ (int)(TYPE), (IND_LEVELS), &win); \
+ if ( win ) \
+ goto WIN; \
+} while (0)
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+
+ On the RS/6000 this is true if the address is valid with a zero offset
+ but not with an offset of four (this means it cannot be used as an
+ address for DImode or DFmode) or is a pre-increment or decrement. Since
+ we know it is valid, we just check for an address that is not valid with
+ an offset of four. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL) \
+{ if (GET_CODE (ADDR) == PLUS \
+ && LEGITIMATE_ADDRESS_INTEGER_P (XEXP (ADDR, 1), 0) \
+ && ! LEGITIMATE_ADDRESS_INTEGER_P (XEXP (ADDR, 1), \
+ (TARGET_32BIT ? 4 : 8))) \
+ goto LABEL; \
+ if (TARGET_UPDATE && GET_CODE (ADDR) == PRE_INC) \
+ goto LABEL; \
+ if (TARGET_UPDATE && GET_CODE (ADDR) == PRE_DEC) \
+ goto LABEL; \
+ if (GET_CODE (ADDR) == LO_SUM) \
+ goto LABEL; \
+}
+
+/* The register number of the register used to address a table of
+ static data addresses in memory. In some cases this register is
+ defined by a processor's "application binary interface" (ABI).
+ When this macro is defined, RTL is generated for this register
+ once, as with the stack pointer and frame pointer registers. If
+ this macro is not defined, it is up to the machine-dependent files
+ to allocate such a register (if necessary). */
+
+#define PIC_OFFSET_TABLE_REGNUM 30
+
+#define TOC_REGISTER (TARGET_MINIMAL_TOC ? 30 : 2)
+
+/* Define this macro if the register defined by
+ `PIC_OFFSET_TABLE_REGNUM' is clobbered by calls. Do not define
+ this macro if `PIC_OFFSET_TABLE_REGNUM' is not defined. */
+
+/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */
+
+/* By generating position-independent code, when two different
+ programs (A and B) share a common library (libC.a), the text of
+ the library can be shared whether or not the library is linked at
+ the same address for both programs. In some of these
+ environments, position-independent code requires not only the use
+ of different addressing modes, but also special code to enable the
+ use of these addressing modes.
+
+ The `FINALIZE_PIC' macro serves as a hook to emit these special
+ codes once the function is being compiled into assembly code, but
+ not before. (It is not done before, because in the case of
+ compiling an inline function, it would lead to multiple PIC
+ prologues being included in functions which used inline functions
+ and were compiled to assembly language.) */
+
+/* #define FINALIZE_PIC */
+
+/* A C expression that is nonzero if X is a legitimate immediate
+ operand on the target machine when generating position independent
+ code. You can assume that X satisfies `CONSTANT_P', so you need
+ not check this. You can also assume FLAG_PIC is true, so you need
+ not check it either. You need not define this macro if all
+ constants (including `SYMBOL_REF') can be immediate operands when
+ generating position independent code. */
+
+/* #define LEGITIMATE_PIC_OPERAND_P (X) */
+
+/* In rare cases, correct code generation requires extra machine
+ dependent processing between the second jump optimization pass and
+ delayed branch scheduling. On those machines, define this macro
+ as a C statement to act on the code starting at INSN. */
+
+/* #define MACHINE_DEPENDENT_REORG(INSN) */
+
+
+/* Define this if some processing needs to be done immediately before
+ emitting code for an insn. */
+
+/* #define FINAL_PRESCAN_INSN(INSN,OPERANDS,NOPERANDS) */
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE SImode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+#define CASE_VECTOR_PC_RELATIVE 1
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 0
+
+/* This flag, if defined, says the same insns that convert to a signed fixnum
+ also convert validly to an unsigned one. */
+
+/* #define FIXUNS_TRUNC_LIKE_FIX_TRUNC */
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX (! TARGET_POWERPC64 ? 4 : 8)
+#define MAX_MOVE_MAX 8
+
+/* Nonzero if access to memory by bytes is no faster than for words.
+ Also non-zero if doing byte operations (specifically shifts) in registers
+ is undesirable. */
+#define SLOW_BYTE_ACCESS 1
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Define if loading short immediate values into registers sign extends. */
+#define SHORT_IMMEDIATES_SIGN_EXTEND
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_32BIT ? SImode : DImode)
+
+/* Mode of a function address in a call instruction (for indexing purposes).
+ Doesn't matter on RS/6000. */
+#define FUNCTION_MODE (TARGET_32BIT ? SImode : DImode)
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits.
+
+ The sle and sre instructions which allow SHIFT_COUNT_TRUNCATED
+ have been dropped from the PowerPC architecture. */
+
+#define SHIFT_COUNT_TRUNCATED (TARGET_POWER ? 1 : 0)
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch.
+
+ On the RS/6000, if it is valid in the insn, it is free. So this
+ always returns 0. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ case CONST_DOUBLE: \
+ case HIGH: \
+ return 0;
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case PLUS: \
+ return ((GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (X, 1)) \
+ + 0x8000) >= 0x10000) \
+ && ((INTVAL (XEXP (X, 1)) & 0xffff) != 0)) \
+ ? COSTS_N_INSNS (2) \
+ : COSTS_N_INSNS (1)); \
+ case AND: \
+ case IOR: \
+ case XOR: \
+ return ((GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && (INTVAL (XEXP (X, 1)) & (~ (HOST_WIDE_INT) 0xffff)) != 0 \
+ && ((INTVAL (XEXP (X, 1)) & 0xffff) != 0)) \
+ ? COSTS_N_INSNS (2) \
+ : COSTS_N_INSNS (1)); \
+ case MULT: \
+ switch (rs6000_cpu) \
+ { \
+ case PROCESSOR_RIOS1: \
+ case PROCESSOR_PPC405: \
+ return (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ ? COSTS_N_INSNS (5) \
+ : INTVAL (XEXP (X, 1)) >= -256 && INTVAL (XEXP (X, 1)) <= 255 \
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4)); \
+ case PROCESSOR_RS64A: \
+ return (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ ? GET_MODE (XEXP (X, 1)) != DImode \
+ ? COSTS_N_INSNS (20) : COSTS_N_INSNS (34) \
+ : INTVAL (XEXP (X, 1)) >= -256 && INTVAL (XEXP (X, 1)) <= 255 \
+ ? COSTS_N_INSNS (8) : COSTS_N_INSNS (12)); \
+ case PROCESSOR_RIOS2: \
+ case PROCESSOR_MPCCORE: \
+ case PROCESSOR_PPC604e: \
+ return COSTS_N_INSNS (2); \
+ case PROCESSOR_PPC601: \
+ return COSTS_N_INSNS (5); \
+ case PROCESSOR_PPC603: \
+ case PROCESSOR_PPC7400: \
+ case PROCESSOR_PPC750: \
+ return (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ ? COSTS_N_INSNS (5) \
+ : INTVAL (XEXP (X, 1)) >= -256 && INTVAL (XEXP (X, 1)) <= 255 \
+ ? COSTS_N_INSNS (2) : COSTS_N_INSNS (3)); \
+ case PROCESSOR_PPC7450: \
+ return (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ ? COSTS_N_INSNS (4) \
+ : COSTS_N_INSNS (3)); \
+ case PROCESSOR_PPC403: \
+ case PROCESSOR_PPC604: \
+ return COSTS_N_INSNS (4); \
+ case PROCESSOR_PPC620: \
+ case PROCESSOR_PPC630: \
+ return (GET_CODE (XEXP (X, 1)) != CONST_INT \
+ ? GET_MODE (XEXP (X, 1)) != DImode \
+ ? COSTS_N_INSNS (5) : COSTS_N_INSNS (7) \
+ : INTVAL (XEXP (X, 1)) >= -256 && INTVAL (XEXP (X, 1)) <= 255 \
+ ? COSTS_N_INSNS (3) : COSTS_N_INSNS (4)); \
+ } \
+ case DIV: \
+ case MOD: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && exact_log2 (INTVAL (XEXP (X, 1))) >= 0) \
+ return COSTS_N_INSNS (2); \
+ /* otherwise fall through to normal divide. */ \
+ case UDIV: \
+ case UMOD: \
+ switch (rs6000_cpu) \
+ { \
+ case PROCESSOR_RIOS1: \
+ return COSTS_N_INSNS (19); \
+ case PROCESSOR_RIOS2: \
+ return COSTS_N_INSNS (13); \
+ case PROCESSOR_RS64A: \
+ return (GET_MODE (XEXP (X, 1)) != DImode \
+ ? COSTS_N_INSNS (65) \
+ : COSTS_N_INSNS (67)); \
+ case PROCESSOR_MPCCORE: \
+ return COSTS_N_INSNS (6); \
+ case PROCESSOR_PPC403: \
+ return COSTS_N_INSNS (33); \
+ case PROCESSOR_PPC405: \
+ return COSTS_N_INSNS (35); \
+ case PROCESSOR_PPC601: \
+ return COSTS_N_INSNS (36); \
+ case PROCESSOR_PPC603: \
+ return COSTS_N_INSNS (37); \
+ case PROCESSOR_PPC604: \
+ case PROCESSOR_PPC604e: \
+ return COSTS_N_INSNS (20); \
+ case PROCESSOR_PPC620: \
+ case PROCESSOR_PPC630: \
+ return (GET_MODE (XEXP (X, 1)) != DImode \
+ ? COSTS_N_INSNS (21) \
+ : COSTS_N_INSNS (37)); \
+ case PROCESSOR_PPC750: \
+ case PROCESSOR_PPC7400: \
+ return COSTS_N_INSNS (19); \
+ case PROCESSOR_PPC7450: \
+ return COSTS_N_INSNS (23); \
+ } \
+ case FFS: \
+ return COSTS_N_INSNS (4); \
+ case MEM: \
+ /* MEM should be slightly more expensive than (plus (reg) (const)) */ \
+ return 5;
+
+/* Compute the cost of an address. This is meant to approximate the size
+ and/or execution delay of an insn using that address. If the cost is
+ approximated by the RTL complexity, including CONST_COSTS above, as
+ is usually the case for CISC machines, this macro should not be defined.
+ For aggressively RISCy machines, only one insn format is allowed, so
+ this macro should be a constant. The value of this macro only matters
+ for valid addresses.
+
+ For the RS/6000, everything is cost 0. */
+
+#define ADDRESS_COST(RTX) 0
+
+/* Adjust the length of an INSN. LENGTH is the currently-computed length and
+ should be adjusted to reflect any required changes. This macro is used when
+ there is some systematic length adjustment required that would be difficult
+ to express in the length attribute. */
+
+/* #define ADJUST_INSN_LENGTH(X,LENGTH) */
+
+/* Add any extra modes needed to represent the condition code.
+
+ For the RS/6000, we need separate modes when unsigned (logical) comparisons
+ are being done and we need a separate mode for floating-point. We also
+ use a mode for the case when we are comparing the results of two
+ comparisons, as then only the EQ bit is valid in the register. */
+
+#define EXTRA_CC_MODES \
+ CC(CCUNSmode, "CCUNS") \
+ CC(CCFPmode, "CCFP") \
+ CC(CCEQmode, "CCEQ")
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a
+ COMPARE, return the mode to be used for the comparison. For
+ floating-point, CCFPmode should be used. CCUNSmode should be used
+ for unsigned comparisons. CCEQmode should be used when we are
+ doing an inequality comparison on the result of a
+ comparison. CCmode should be used in all other cases. */
+
+#define SELECT_CC_MODE(OP,X,Y) \
+ (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT ? CCFPmode \
+ : (OP) == GTU || (OP) == LTU || (OP) == GEU || (OP) == LEU ? CCUNSmode \
+ : (((OP) == EQ || (OP) == NE) && GET_RTX_CLASS (GET_CODE (X)) == '<' \
+ ? CCEQmode : CCmode))
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *rs6000_compare_op0, *rs6000_compare_op1;
+extern int rs6000_compare_fp_p;
+
+/* Control the assembler format that we output. */
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+#define ASM_COMMENT_START " #"
+
+/* Implicit library calls should use memcpy, not bcopy, etc. */
+
+#define TARGET_MEM_FUNCTIONS
+
+/* Flag to say the TOC is initialized */
+extern int toc_initialized;
+
+/* Macro to output a special constant pool entry. Go to WIN if we output
+ it. Otherwise, it is written the usual way.
+
+ On the RS/6000, toc entries are handled this way. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY(FILE, X, MODE, ALIGN, LABELNO, WIN) \
+{ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (X, MODE)) \
+ { \
+ output_toc (FILE, X, LABELNO, MODE); \
+ goto WIN; \
+ } \
+}
+
+#ifdef HAVE_GAS_WEAK
+#define RS6000_WEAK 1
+#else
+#define RS6000_WEAK 0
+#endif
+
+/* This implementes the `alias' attribute. */
+#define ASM_OUTPUT_DEF_FROM_DECLS(FILE,decl,target) \
+do { \
+ const char * alias = XSTR (XEXP (DECL_RTL (decl), 0), 0); \
+ char * name = IDENTIFIER_POINTER (target); \
+ if (TREE_CODE (decl) == FUNCTION_DECL \
+ && DEFAULT_ABI == ABI_AIX) \
+ { \
+ if (TREE_PUBLIC (decl)) \
+ { \
+ if (RS6000_WEAK && DECL_WEAK (decl)) \
+ { \
+ fputs ("\t.weak .", FILE); \
+ assemble_name (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ else \
+ { \
+ fputs ("\t.globl .", FILE); \
+ assemble_name (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else \
+ { \
+ fputs ("\t.lglobl .", FILE); \
+ assemble_name (FILE, alias); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.set .", FILE); \
+ assemble_name (FILE, alias); \
+ fputs (",.", FILE); \
+ assemble_name (FILE, name); \
+ fputc ('\n', FILE); \
+ } \
+ ASM_OUTPUT_DEF (FILE, alias, name); \
+} while (0)
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+extern char rs6000_reg_names[][8]; /* register names (0 vs. %r0). */
+
+#define REGISTER_NAMES \
+{ \
+ &rs6000_reg_names[ 0][0], /* r0 */ \
+ &rs6000_reg_names[ 1][0], /* r1 */ \
+ &rs6000_reg_names[ 2][0], /* r2 */ \
+ &rs6000_reg_names[ 3][0], /* r3 */ \
+ &rs6000_reg_names[ 4][0], /* r4 */ \
+ &rs6000_reg_names[ 5][0], /* r5 */ \
+ &rs6000_reg_names[ 6][0], /* r6 */ \
+ &rs6000_reg_names[ 7][0], /* r7 */ \
+ &rs6000_reg_names[ 8][0], /* r8 */ \
+ &rs6000_reg_names[ 9][0], /* r9 */ \
+ &rs6000_reg_names[10][0], /* r10 */ \
+ &rs6000_reg_names[11][0], /* r11 */ \
+ &rs6000_reg_names[12][0], /* r12 */ \
+ &rs6000_reg_names[13][0], /* r13 */ \
+ &rs6000_reg_names[14][0], /* r14 */ \
+ &rs6000_reg_names[15][0], /* r15 */ \
+ &rs6000_reg_names[16][0], /* r16 */ \
+ &rs6000_reg_names[17][0], /* r17 */ \
+ &rs6000_reg_names[18][0], /* r18 */ \
+ &rs6000_reg_names[19][0], /* r19 */ \
+ &rs6000_reg_names[20][0], /* r20 */ \
+ &rs6000_reg_names[21][0], /* r21 */ \
+ &rs6000_reg_names[22][0], /* r22 */ \
+ &rs6000_reg_names[23][0], /* r23 */ \
+ &rs6000_reg_names[24][0], /* r24 */ \
+ &rs6000_reg_names[25][0], /* r25 */ \
+ &rs6000_reg_names[26][0], /* r26 */ \
+ &rs6000_reg_names[27][0], /* r27 */ \
+ &rs6000_reg_names[28][0], /* r28 */ \
+ &rs6000_reg_names[29][0], /* r29 */ \
+ &rs6000_reg_names[30][0], /* r30 */ \
+ &rs6000_reg_names[31][0], /* r31 */ \
+ \
+ &rs6000_reg_names[32][0], /* fr0 */ \
+ &rs6000_reg_names[33][0], /* fr1 */ \
+ &rs6000_reg_names[34][0], /* fr2 */ \
+ &rs6000_reg_names[35][0], /* fr3 */ \
+ &rs6000_reg_names[36][0], /* fr4 */ \
+ &rs6000_reg_names[37][0], /* fr5 */ \
+ &rs6000_reg_names[38][0], /* fr6 */ \
+ &rs6000_reg_names[39][0], /* fr7 */ \
+ &rs6000_reg_names[40][0], /* fr8 */ \
+ &rs6000_reg_names[41][0], /* fr9 */ \
+ &rs6000_reg_names[42][0], /* fr10 */ \
+ &rs6000_reg_names[43][0], /* fr11 */ \
+ &rs6000_reg_names[44][0], /* fr12 */ \
+ &rs6000_reg_names[45][0], /* fr13 */ \
+ &rs6000_reg_names[46][0], /* fr14 */ \
+ &rs6000_reg_names[47][0], /* fr15 */ \
+ &rs6000_reg_names[48][0], /* fr16 */ \
+ &rs6000_reg_names[49][0], /* fr17 */ \
+ &rs6000_reg_names[50][0], /* fr18 */ \
+ &rs6000_reg_names[51][0], /* fr19 */ \
+ &rs6000_reg_names[52][0], /* fr20 */ \
+ &rs6000_reg_names[53][0], /* fr21 */ \
+ &rs6000_reg_names[54][0], /* fr22 */ \
+ &rs6000_reg_names[55][0], /* fr23 */ \
+ &rs6000_reg_names[56][0], /* fr24 */ \
+ &rs6000_reg_names[57][0], /* fr25 */ \
+ &rs6000_reg_names[58][0], /* fr26 */ \
+ &rs6000_reg_names[59][0], /* fr27 */ \
+ &rs6000_reg_names[60][0], /* fr28 */ \
+ &rs6000_reg_names[61][0], /* fr29 */ \
+ &rs6000_reg_names[62][0], /* fr30 */ \
+ &rs6000_reg_names[63][0], /* fr31 */ \
+ \
+ &rs6000_reg_names[64][0], /* mq */ \
+ &rs6000_reg_names[65][0], /* lr */ \
+ &rs6000_reg_names[66][0], /* ctr */ \
+ &rs6000_reg_names[67][0], /* ap */ \
+ \
+ &rs6000_reg_names[68][0], /* cr0 */ \
+ &rs6000_reg_names[69][0], /* cr1 */ \
+ &rs6000_reg_names[70][0], /* cr2 */ \
+ &rs6000_reg_names[71][0], /* cr3 */ \
+ &rs6000_reg_names[72][0], /* cr4 */ \
+ &rs6000_reg_names[73][0], /* cr5 */ \
+ &rs6000_reg_names[74][0], /* cr6 */ \
+ &rs6000_reg_names[75][0], /* cr7 */ \
+ \
+ &rs6000_reg_names[76][0], /* xer */ \
+ \
+ &rs6000_reg_names[77][0], /* v0 */ \
+ &rs6000_reg_names[78][0], /* v1 */ \
+ &rs6000_reg_names[79][0], /* v2 */ \
+ &rs6000_reg_names[80][0], /* v3 */ \
+ &rs6000_reg_names[81][0], /* v4 */ \
+ &rs6000_reg_names[82][0], /* v5 */ \
+ &rs6000_reg_names[83][0], /* v6 */ \
+ &rs6000_reg_names[84][0], /* v7 */ \
+ &rs6000_reg_names[85][0], /* v8 */ \
+ &rs6000_reg_names[86][0], /* v9 */ \
+ &rs6000_reg_names[87][0], /* v10 */ \
+ &rs6000_reg_names[88][0], /* v11 */ \
+ &rs6000_reg_names[89][0], /* v12 */ \
+ &rs6000_reg_names[90][0], /* v13 */ \
+ &rs6000_reg_names[91][0], /* v14 */ \
+ &rs6000_reg_names[92][0], /* v15 */ \
+ &rs6000_reg_names[93][0], /* v16 */ \
+ &rs6000_reg_names[94][0], /* v17 */ \
+ &rs6000_reg_names[95][0], /* v18 */ \
+ &rs6000_reg_names[96][0], /* v19 */ \
+ &rs6000_reg_names[97][0], /* v20 */ \
+ &rs6000_reg_names[98][0], /* v21 */ \
+ &rs6000_reg_names[99][0], /* v22 */ \
+ &rs6000_reg_names[100][0], /* v23 */ \
+ &rs6000_reg_names[101][0], /* v24 */ \
+ &rs6000_reg_names[102][0], /* v25 */ \
+ &rs6000_reg_names[103][0], /* v26 */ \
+ &rs6000_reg_names[104][0], /* v27 */ \
+ &rs6000_reg_names[105][0], /* v28 */ \
+ &rs6000_reg_names[106][0], /* v29 */ \
+ &rs6000_reg_names[107][0], /* v30 */ \
+ &rs6000_reg_names[108][0], /* v31 */ \
+ &rs6000_reg_names[109][0], /* vrsave */ \
+}
+
+/* print-rtl can't handle the above REGISTER_NAMES, so define the
+ following for it. Switch to use the alternate names since
+ they are more mnemonic. */
+
+#define DEBUG_REGISTER_NAMES \
+{ \
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", \
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", \
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", \
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31", \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
+ "mq", "lr", "ctr", "ap", \
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7", \
+ "xer", \
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
+ "vrsave" \
+}
+
+/* Table of additional register names to use in user input. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+ {{"r0", 0}, {"r1", 1}, {"r2", 2}, {"r3", 3}, \
+ {"r4", 4}, {"r5", 5}, {"r6", 6}, {"r7", 7}, \
+ {"r8", 8}, {"r9", 9}, {"r10", 10}, {"r11", 11}, \
+ {"r12", 12}, {"r13", 13}, {"r14", 14}, {"r15", 15}, \
+ {"r16", 16}, {"r17", 17}, {"r18", 18}, {"r19", 19}, \
+ {"r20", 20}, {"r21", 21}, {"r22", 22}, {"r23", 23}, \
+ {"r24", 24}, {"r25", 25}, {"r26", 26}, {"r27", 27}, \
+ {"r28", 28}, {"r29", 29}, {"r30", 30}, {"r31", 31}, \
+ {"fr0", 32}, {"fr1", 33}, {"fr2", 34}, {"fr3", 35}, \
+ {"fr4", 36}, {"fr5", 37}, {"fr6", 38}, {"fr7", 39}, \
+ {"fr8", 40}, {"fr9", 41}, {"fr10", 42}, {"fr11", 43}, \
+ {"fr12", 44}, {"fr13", 45}, {"fr14", 46}, {"fr15", 47}, \
+ {"fr16", 48}, {"fr17", 49}, {"fr18", 50}, {"fr19", 51}, \
+ {"fr20", 52}, {"fr21", 53}, {"fr22", 54}, {"fr23", 55}, \
+ {"fr24", 56}, {"fr25", 57}, {"fr26", 58}, {"fr27", 59}, \
+ {"fr28", 60}, {"fr29", 61}, {"fr30", 62}, {"fr31", 63}, \
+ {"v0", 77}, {"v1", 78}, {"v2", 79}, {"v3", 80}, \
+ {"v4", 81}, {"v5", 82}, {"v6", 83}, {"v7", 84}, \
+ {"v8", 85}, {"v9", 86}, {"v10", 87}, {"v11", 88}, \
+ {"v12", 89}, {"v13", 90}, {"v14", 91}, {"v15", 92}, \
+ {"v16", 93}, {"v17", 94}, {"v18", 95}, {"v19", 96}, \
+ {"v20", 97}, {"v21", 98}, {"v22", 99}, {"v23", 100}, \
+ {"v24", 101},{"v25", 102},{"v26", 103},{"v27", 104}, \
+ {"v28", 105},{"v29", 106},{"v30", 107},{"v31", 108}, \
+ {"vrsave", 109}, \
+ /* no additional names for: mq, lr, ctr, ap */ \
+ {"cr0", 68}, {"cr1", 69}, {"cr2", 70}, {"cr3", 71}, \
+ {"cr4", 72}, {"cr5", 73}, {"cr6", 74}, {"cr7", 75}, \
+ {"cc", 68}, {"sp", 1}, {"toc", 2} }
+
+/* Text to write out after a CALL that may be replaced by glue code by
+ the loader. This depends on the AIX version. */
+#define RS6000_CALL_GLUE "cror 31,31,31"
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do { char buf[100]; \
+ fputs ("\t.long ", FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", VALUE); \
+ assemble_name (FILE, buf); \
+ putc ('-', FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", REL); \
+ assemble_name (FILE, buf); \
+ putc ('\n', FILE); \
+ } while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (LOG))
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+/* Pick up the return address upon entry to a procedure. Used for
+ dwarf2 unwind information. This also enables the table driven
+ mechanism. */
+
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM)
+#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LINK_REGISTER_REGNUM)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 3 : INVALID_REGNUM)
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 10)
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Define which CODE values are valid. */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '.')
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) print_operand_address (FILE, ADDR)
+
+/* Define the codes that are matched by predicates in rs6000.c. */
+
+#define PREDICATE_CODES \
+ {"short_cint_operand", {CONST_INT}}, \
+ {"u_short_cint_operand", {CONST_INT}}, \
+ {"non_short_cint_operand", {CONST_INT}}, \
+ {"exact_log2_cint_operand", {CONST_INT}}, \
+ {"gpc_reg_operand", {SUBREG, REG}}, \
+ {"cc_reg_operand", {SUBREG, REG}}, \
+ {"cc_reg_not_cr0_operand", {SUBREG, REG}}, \
+ {"reg_or_short_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_neg_short_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_u_short_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_arith_cint_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_add_cint64_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_sub_cint64_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_logical_cint_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
+ {"got_operand", {SYMBOL_REF, CONST, LABEL_REF}}, \
+ {"got_no_const_operand", {SYMBOL_REF, LABEL_REF}}, \
+ {"easy_fp_constant", {CONST_DOUBLE}}, \
+ {"zero_fp_constant", {CONST_DOUBLE}}, \
+ {"reg_or_mem_operand", {SUBREG, MEM, REG}}, \
+ {"lwa_operand", {SUBREG, MEM, REG}}, \
+ {"volatile_mem_operand", {MEM}}, \
+ {"offsettable_mem_operand", {MEM}}, \
+ {"mem_or_easy_const_operand", {SUBREG, MEM, CONST_DOUBLE}}, \
+ {"add_operand", {SUBREG, REG, CONST_INT}}, \
+ {"non_add_cint_operand", {CONST_INT}}, \
+ {"and_operand", {SUBREG, REG, CONST_INT}}, \
+ {"and64_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
+ {"logical_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
+ {"non_logical_cint_operand", {CONST_INT, CONST_DOUBLE}}, \
+ {"mask_operand", {CONST_INT}}, \
+ {"mask64_operand", {CONST_INT, CONST_DOUBLE}}, \
+ {"count_register_operand", {REG}}, \
+ {"xer_operand", {REG}}, \
+ {"call_operand", {SYMBOL_REF, REG}}, \
+ {"current_file_function_operand", {SYMBOL_REF}}, \
+ {"input_operand", {SUBREG, MEM, REG, CONST_INT, \
+ CONST_DOUBLE, SYMBOL_REF}}, \
+ {"load_multiple_operation", {PARALLEL}}, \
+ {"store_multiple_operation", {PARALLEL}}, \
+ {"vrsave_operation", {PARALLEL}}, \
+ {"branch_comparison_operator", {EQ, NE, LE, LT, GE, \
+ GT, LEU, LTU, GEU, GTU, \
+ UNORDERED, ORDERED, \
+ UNGE, UNLE }}, \
+ {"branch_positive_comparison_operator", {EQ, LT, GT, LTU, GTU, \
+ UNORDERED }}, \
+ {"scc_comparison_operator", {EQ, NE, LE, LT, GE, \
+ GT, LEU, LTU, GEU, GTU, \
+ UNORDERED, ORDERED, \
+ UNGE, UNLE }}, \
+ {"trap_comparison_operator", {EQ, NE, LE, LT, GE, \
+ GT, LEU, LTU, GEU, GTU}}, \
+ {"boolean_operator", {AND, IOR, XOR}}, \
+ {"boolean_or_operator", {IOR, XOR}}, \
+ {"min_max_operator", {SMIN, SMAX, UMIN, UMAX}},
+
+/* uncomment for disabling the corresponding default options */
+/* #define MACHINE_no_sched_interblock */
+/* #define MACHINE_no_sched_speculative */
+/* #define MACHINE_no_sched_speculative_load */
+
+/* General flags. */
+extern int flag_pic;
+extern int optimize;
+extern int flag_expensive_optimizations;
+extern int frame_pointer_needed;
+
+enum rs6000_builtins
+{
+ /* AltiVec builtins. */
+ ALTIVEC_BUILTIN_ST_INTERNAL_4si,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4si,
+ ALTIVEC_BUILTIN_ST_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_8hi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_LD_INTERNAL_16qi,
+ ALTIVEC_BUILTIN_ST_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_LD_INTERNAL_4sf,
+ ALTIVEC_BUILTIN_VADDUBM,
+ ALTIVEC_BUILTIN_VADDUHM,
+ ALTIVEC_BUILTIN_VADDUWM,
+ ALTIVEC_BUILTIN_VADDFP,
+ ALTIVEC_BUILTIN_VADDCUW,
+ ALTIVEC_BUILTIN_VADDUBS,
+ ALTIVEC_BUILTIN_VADDSBS,
+ ALTIVEC_BUILTIN_VADDUHS,
+ ALTIVEC_BUILTIN_VADDSHS,
+ ALTIVEC_BUILTIN_VADDUWS,
+ ALTIVEC_BUILTIN_VADDSWS,
+ ALTIVEC_BUILTIN_VAND,
+ ALTIVEC_BUILTIN_VANDC,
+ ALTIVEC_BUILTIN_VAVGUB,
+ ALTIVEC_BUILTIN_VAVGSB,
+ ALTIVEC_BUILTIN_VAVGUH,
+ ALTIVEC_BUILTIN_VAVGSH,
+ ALTIVEC_BUILTIN_VAVGUW,
+ ALTIVEC_BUILTIN_VAVGSW,
+ ALTIVEC_BUILTIN_VCFUX,
+ ALTIVEC_BUILTIN_VCFSX,
+ ALTIVEC_BUILTIN_VCTSXS,
+ ALTIVEC_BUILTIN_VCTUXS,
+ ALTIVEC_BUILTIN_VCMPBFP,
+ ALTIVEC_BUILTIN_VCMPEQUB,
+ ALTIVEC_BUILTIN_VCMPEQUH,
+ ALTIVEC_BUILTIN_VCMPEQUW,
+ ALTIVEC_BUILTIN_VCMPEQFP,
+ ALTIVEC_BUILTIN_VCMPGEFP,
+ ALTIVEC_BUILTIN_VCMPGTUB,
+ ALTIVEC_BUILTIN_VCMPGTSB,
+ ALTIVEC_BUILTIN_VCMPGTUH,
+ ALTIVEC_BUILTIN_VCMPGTSH,
+ ALTIVEC_BUILTIN_VCMPGTUW,
+ ALTIVEC_BUILTIN_VCMPGTSW,
+ ALTIVEC_BUILTIN_VCMPGTFP,
+ ALTIVEC_BUILTIN_VEXPTEFP,
+ ALTIVEC_BUILTIN_VLOGEFP,
+ ALTIVEC_BUILTIN_VMADDFP,
+ ALTIVEC_BUILTIN_VMAXUB,
+ ALTIVEC_BUILTIN_VMAXSB,
+ ALTIVEC_BUILTIN_VMAXUH,
+ ALTIVEC_BUILTIN_VMAXSH,
+ ALTIVEC_BUILTIN_VMAXUW,
+ ALTIVEC_BUILTIN_VMAXSW,
+ ALTIVEC_BUILTIN_VMAXFP,
+ ALTIVEC_BUILTIN_VMHADDSHS,
+ ALTIVEC_BUILTIN_VMHRADDSHS,
+ ALTIVEC_BUILTIN_VMLADDUHM,
+ ALTIVEC_BUILTIN_VMRGHB,
+ ALTIVEC_BUILTIN_VMRGHH,
+ ALTIVEC_BUILTIN_VMRGHW,
+ ALTIVEC_BUILTIN_VMRGLB,
+ ALTIVEC_BUILTIN_VMRGLH,
+ ALTIVEC_BUILTIN_VMRGLW,
+ ALTIVEC_BUILTIN_VMSUMUBM,
+ ALTIVEC_BUILTIN_VMSUMMBM,
+ ALTIVEC_BUILTIN_VMSUMUHM,
+ ALTIVEC_BUILTIN_VMSUMSHM,
+ ALTIVEC_BUILTIN_VMSUMUHS,
+ ALTIVEC_BUILTIN_VMSUMSHS,
+ ALTIVEC_BUILTIN_VMINUB,
+ ALTIVEC_BUILTIN_VMINSB,
+ ALTIVEC_BUILTIN_VMINUH,
+ ALTIVEC_BUILTIN_VMINSH,
+ ALTIVEC_BUILTIN_VMINUW,
+ ALTIVEC_BUILTIN_VMINSW,
+ ALTIVEC_BUILTIN_VMINFP,
+ ALTIVEC_BUILTIN_VMULEUB,
+ ALTIVEC_BUILTIN_VMULESB,
+ ALTIVEC_BUILTIN_VMULEUH,
+ ALTIVEC_BUILTIN_VMULESH,
+ ALTIVEC_BUILTIN_VMULOUB,
+ ALTIVEC_BUILTIN_VMULOSB,
+ ALTIVEC_BUILTIN_VMULOUH,
+ ALTIVEC_BUILTIN_VMULOSH,
+ ALTIVEC_BUILTIN_VNMSUBFP,
+ ALTIVEC_BUILTIN_VNOR,
+ ALTIVEC_BUILTIN_VOR,
+ ALTIVEC_BUILTIN_VSEL_4SI,
+ ALTIVEC_BUILTIN_VSEL_4SF,
+ ALTIVEC_BUILTIN_VSEL_8HI,
+ ALTIVEC_BUILTIN_VSEL_16QI,
+ ALTIVEC_BUILTIN_VPERM_4SI,
+ ALTIVEC_BUILTIN_VPERM_4SF,
+ ALTIVEC_BUILTIN_VPERM_8HI,
+ ALTIVEC_BUILTIN_VPERM_16QI,
+ ALTIVEC_BUILTIN_VPKUHUM,
+ ALTIVEC_BUILTIN_VPKUWUM,
+ ALTIVEC_BUILTIN_VPKPX,
+ ALTIVEC_BUILTIN_VPKUHSS,
+ ALTIVEC_BUILTIN_VPKSHSS,
+ ALTIVEC_BUILTIN_VPKUWSS,
+ ALTIVEC_BUILTIN_VPKSWSS,
+ ALTIVEC_BUILTIN_VPKUHUS,
+ ALTIVEC_BUILTIN_VPKSHUS,
+ ALTIVEC_BUILTIN_VPKUWUS,
+ ALTIVEC_BUILTIN_VPKSWUS,
+ ALTIVEC_BUILTIN_VREFP,
+ ALTIVEC_BUILTIN_VRFIM,
+ ALTIVEC_BUILTIN_VRFIN,
+ ALTIVEC_BUILTIN_VRFIP,
+ ALTIVEC_BUILTIN_VRFIZ,
+ ALTIVEC_BUILTIN_VRLB,
+ ALTIVEC_BUILTIN_VRLH,
+ ALTIVEC_BUILTIN_VRLW,
+ ALTIVEC_BUILTIN_VRSQRTEFP,
+ ALTIVEC_BUILTIN_VSLB,
+ ALTIVEC_BUILTIN_VSLH,
+ ALTIVEC_BUILTIN_VSLW,
+ ALTIVEC_BUILTIN_VSL,
+ ALTIVEC_BUILTIN_VSLO,
+ ALTIVEC_BUILTIN_VSPLTB,
+ ALTIVEC_BUILTIN_VSPLTH,
+ ALTIVEC_BUILTIN_VSPLTW,
+ ALTIVEC_BUILTIN_VSPLTISB,
+ ALTIVEC_BUILTIN_VSPLTISH,
+ ALTIVEC_BUILTIN_VSPLTISW,
+ ALTIVEC_BUILTIN_VSRB,
+ ALTIVEC_BUILTIN_VSRH,
+ ALTIVEC_BUILTIN_VSRW,
+ ALTIVEC_BUILTIN_VSRAB,
+ ALTIVEC_BUILTIN_VSRAH,
+ ALTIVEC_BUILTIN_VSRAW,
+ ALTIVEC_BUILTIN_VSR,
+ ALTIVEC_BUILTIN_VSRO,
+ ALTIVEC_BUILTIN_VSUBUBM,
+ ALTIVEC_BUILTIN_VSUBUHM,
+ ALTIVEC_BUILTIN_VSUBUWM,
+ ALTIVEC_BUILTIN_VSUBFP,
+ ALTIVEC_BUILTIN_VSUBCUW,
+ ALTIVEC_BUILTIN_VSUBUBS,
+ ALTIVEC_BUILTIN_VSUBSBS,
+ ALTIVEC_BUILTIN_VSUBUHS,
+ ALTIVEC_BUILTIN_VSUBSHS,
+ ALTIVEC_BUILTIN_VSUBUWS,
+ ALTIVEC_BUILTIN_VSUBSWS,
+ ALTIVEC_BUILTIN_VSUM4UBS,
+ ALTIVEC_BUILTIN_VSUM4SBS,
+ ALTIVEC_BUILTIN_VSUM4SHS,
+ ALTIVEC_BUILTIN_VSUM2SWS,
+ ALTIVEC_BUILTIN_VSUMSWS,
+ ALTIVEC_BUILTIN_VXOR,
+ ALTIVEC_BUILTIN_VSLDOI_16QI,
+ ALTIVEC_BUILTIN_VSLDOI_8HI,
+ ALTIVEC_BUILTIN_VSLDOI_4SI,
+ ALTIVEC_BUILTIN_VSLDOI_4SF,
+ ALTIVEC_BUILTIN_VUPKHSB,
+ ALTIVEC_BUILTIN_VUPKHPX,
+ ALTIVEC_BUILTIN_VUPKHSH,
+ ALTIVEC_BUILTIN_VUPKLSB,
+ ALTIVEC_BUILTIN_VUPKLPX,
+ ALTIVEC_BUILTIN_VUPKLSH,
+ ALTIVEC_BUILTIN_VCMPBFP_P,
+ ALTIVEC_BUILTIN_VCMPEQFP_P,
+ ALTIVEC_BUILTIN_VCMPEQUB_P,
+ ALTIVEC_BUILTIN_VCMPEQUH_P,
+ ALTIVEC_BUILTIN_VCMPEQUW_P,
+ ALTIVEC_BUILTIN_VCMPGEFP_P,
+ ALTIVEC_BUILTIN_VCMPGTFP_P,
+ ALTIVEC_BUILTIN_VCMPGTSB_P,
+ ALTIVEC_BUILTIN_VCMPGTSH_P,
+ ALTIVEC_BUILTIN_VCMPGTSW_P,
+ ALTIVEC_BUILTIN_VCMPGTUB_P,
+ ALTIVEC_BUILTIN_VCMPGTUH_P,
+ ALTIVEC_BUILTIN_VCMPGTUW_P,
+ ALTIVEC_BUILTIN_MTVSCR,
+ ALTIVEC_BUILTIN_MFVSCR,
+ ALTIVEC_BUILTIN_DSSALL,
+ ALTIVEC_BUILTIN_DSS,
+ ALTIVEC_BUILTIN_LVSL,
+ ALTIVEC_BUILTIN_LVSR,
+ ALTIVEC_BUILTIN_DSTT,
+ ALTIVEC_BUILTIN_DSTST,
+ ALTIVEC_BUILTIN_DSTSTT,
+ ALTIVEC_BUILTIN_DST,
+ ALTIVEC_BUILTIN_LVEBX,
+ ALTIVEC_BUILTIN_LVEHX,
+ ALTIVEC_BUILTIN_LVEWX,
+ ALTIVEC_BUILTIN_LVXL,
+ ALTIVEC_BUILTIN_LVX,
+ ALTIVEC_BUILTIN_STVX,
+ ALTIVEC_BUILTIN_STVEBX,
+ ALTIVEC_BUILTIN_STVEHX,
+ ALTIVEC_BUILTIN_STVEWX,
+ ALTIVEC_BUILTIN_STVXL
+};
diff --git a/contrib/gcc/config/rs6000/rs6000.md b/contrib/gcc/config/rs6000/rs6000.md
new file mode 100644
index 0000000..8fb45ff
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rs6000.md
@@ -0,0 +1,15598 @@
+;; Machine description for IBM RISC System 6000 (POWER) for GNU C compiler
+;; Copyright (C) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
+;; 1999, 2000, 2001 Free Software Foundation, Inc.
+;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; `unspec' values used in rs6000.md:
+;; Number Use
+;; 0 frsp for POWER machines
+;; 0/v blockage
+;; 5 used to tie the stack contents and the stack pointer
+;; 6 address of a word pointing to the TOC
+;; 7 address of the TOC (more-or-less)
+;; 8 movsi_got
+;; 9/v eh_reg_restore
+;; 10 fctiwz
+;; 19 movesi_from_cr
+;; 20 movesi_to_cr
+
+;; Define an insn type attribute. This is used in function unit delay
+;; computations.
+(define_attr "type" "integer,load,store,fpload,fpstore,vecload,vecstore,imul,imul2,imul3,lmul,idiv,ldiv,branch,compare,cr_logical,delayed_compare,fpcompare,mtjmpr,fp,dmul,sdiv,ddiv,ssqrt,dsqrt,jmpreg,vecsimple,veccomplex,veccmp,vecperm,vecfloat,altivec"
+ (const_string "integer"))
+
+;; Length (in bytes).
+; '(pc)' in the following doesn't include the instruction itself; it is
+; calculated as if the instruction had zero size.
+(define_attr "length" ""
+ (if_then_else (eq_attr "type" "branch")
+ (if_then_else (and (ge (minus (match_dup 0) (pc))
+ (const_int -32768))
+ (lt (minus (match_dup 0) (pc))
+ (const_int 32764)))
+ (const_int 4)
+ (const_int 8))
+ (const_int 4)))
+
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in rs6000.h.
+
+(define_attr "cpu" "rios1,rios2,rs64a,mpccore,ppc403,ppc405,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400,ppc7450"
+ (const (symbol_ref "rs6000_cpu_attr")))
+
+; (define_function_unit NAME MULTIPLICITY SIMULTANEITY
+; TEST READY-DELAY ISSUE-DELAY [CONFLICT-LIST])
+
+; Load/Store Unit -- pure PowerPC only
+; (POWER and 601 use Integer Unit)
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "rs64a,mpccore,ppc603,ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400"))
+ 2 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "load,vecload")
+ (eq_attr "cpu" "ppc7450"))
+ 3 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "store,fpstore")
+ (eq_attr "cpu" "rs64a,mpccore,ppc603,ppc604,ppc604e,ppc620,ppc630"))
+ 1 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "store,fpstore")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 2 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "store,vecstore")
+ (eq_attr "cpu" "ppc7450"))
+ 3 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "ppc7450"))
+ 3 3)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "mpccore,ppc603,ppc750,ppc7400"))
+ 2 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "ppc7450"))
+ 4 1)
+
+(define_function_unit "lsu" 1 0
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "rs64a,ppc604,ppc604e,ppc620,ppc630"))
+ 3 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "load")
+ (eq_attr "cpu" "rios1,ppc403,ppc405,ppc601"))
+ 2 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "store,fpstore")
+ (eq_attr "cpu" "rios1,ppc403,ppc405,ppc601"))
+ 1 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fpstore")
+ (eq_attr "cpu" "rios1,ppc601"))
+ 0 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "rios1"))
+ 2 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "fpload")
+ (eq_attr "cpu" "ppc601"))
+ 3 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "load,fpload")
+ (eq_attr "cpu" "rios2"))
+ 2 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "store,fpstore")
+ (eq_attr "cpu" "rios2"))
+ 1 1)
+
+; Integer Unit (RIOS1, PPC601, PPC603, RS64a)
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "rios1,rs64a,mpccore,ppc403,ppc405,ppc601,ppc603"))
+ 1 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "mpccore,ppc403,ppc405,ppc601"))
+ 1 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "ppc403"))
+ 4 4)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "ppc405"))
+ 4 3)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc405"))
+ 3 2)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "rios1"))
+ 5 5)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rios1"))
+ 4 4)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "rios1"))
+ 3 3)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "ppc601,ppc603"))
+ 5 5)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "rs64a"))
+ 20 20)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "rs64a"))
+ 12 12)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "rs64a"))
+ 8 8)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "rs64a"))
+ 34 34)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios1"))
+ 19 19)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rs64a"))
+ 66 66)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "rs64a"))
+ 66 66)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc403"))
+ 33 33)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc405"))
+ 35 35)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc601"))
+ 36 36)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc603"))
+ 37 36)
+
+; RIOS2 has two integer units: a primary one which can perform all
+; operations and a secondary one which is fed in lock step with the first
+; and can perform "simple" integer operations.
+; To catch this we define a 'dummy' imuldiv-unit that is also needed
+; for the complex insns.
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "rios2"))
+ 1 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "rios2"))
+ 2 2)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios2"))
+ 13 13)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "rios2"))
+ 2 2)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "rios2"))
+ 13 13)
+
+; MPCCORE has separate IMUL/IDIV unit for multicycle instructions
+; Divide latency varies greatly from 2-11, use 6 as average
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "mpccore"))
+ 2 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "mpccore"))
+ 6 6)
+
+; PPC604{,e} has two units that perform integer operations
+; and one unit for divide/multiply operations (and move
+; from/to spr).
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630"))
+ 1 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "ppc604"))
+ 4 2)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul,imul2,imul3")
+ (eq_attr "cpu" "ppc604e"))
+ 2 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ 5 3)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ 4 3)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ 3 3)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "lmul")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ 7 5)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc604,ppc604e"))
+ 20 19)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc620"))
+ 37 36)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc630"))
+ 21 20)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "ldiv")
+ (eq_attr "cpu" "ppc620,ppc630"))
+ 37 36)
+
+; PPC7450 has 3 integer units (for most integer insns) and one mul/div
+; unit, which also does CR-logical insns and move to/from SPR.
+; It also has 4 vector units, one for each type of vector instruction.
+; However, we can only dispatch 2 instructions per cycle.
+; We model this as saying that dispatching two of the same type of instruction
+; in a row incurs a single cycle delay.
+(define_function_unit "iu3" 3 0
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "ppc7450"))
+ 1 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "ppc7450"))
+ 4 2)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul2,imul3")
+ (eq_attr "cpu" "ppc7450"))
+ 3 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc7450"))
+ 23 23)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "ppc7450"))
+ 1 1)
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc7450"))
+ 1 2 [(eq_attr "type" "vecsimple")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecsimple")
+ (eq_attr "cpu" "ppc7450"))
+ 1 1 [(eq_attr "type" "!vecsimple")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7450"))
+ 4 2 [(eq_attr "type" "veccomplex")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "veccomplex")
+ (eq_attr "cpu" "ppc7450"))
+ 4 1 [(eq_attr "type" "!veccomplex")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc7450"))
+ 2 2 [(eq_attr "type" "veccmp")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "veccmp")
+ (eq_attr "cpu" "ppc7450"))
+ 2 1 [(eq_attr "type" "!veccmp")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7450"))
+ 4 2 [(eq_attr "type" "vecfloat")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecfloat")
+ (eq_attr "cpu" "ppc7450"))
+ 4 1 [(eq_attr "type" "!vecfloat")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7450"))
+ 2 2 [(eq_attr "type" "vecperm")])
+(define_function_unit "vec_alu2" 2 0
+ (and (eq_attr "type" "vecperm")
+ (eq_attr "cpu" "ppc7450"))
+ 2 1 [(eq_attr "type" "!vecperm")])
+
+; PPC750 has two integer units: a primary one which can perform all
+; operations and a secondary one which is fed in lock step with the first
+; and can perform "simple" integer operations.
+; To catch this we define a 'dummy' imuldiv-unit that is also needed
+; for the complex insns.
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "integer")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 1 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 4 4)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 3 2)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 2 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 19 19)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 4 4)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul2")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 3 2)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "imul3")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 2 1)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "idiv")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 19 19)
+
+; CR-logical operations are execute-serialized, that is they don't
+; start (and block the function unit) until all preceding operations
+; have finished. They don't block dispatch of other insns, though.
+; I've imitated this by giving them longer latency.
+(define_function_unit "sru" 1 0
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "ppc603,ppc750,ppc7400"))
+ 3 2)
+
+; compare is done on integer unit, but feeds insns which
+; execute on the branch unit.
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "compare")
+ (eq_attr "cpu" "rios1"))
+ 4 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "delayed_compare")
+ (eq_attr "cpu" "rios1"))
+ 5 1)
+
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "rs64a,mpccore,ppc403,ppc405,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630"))
+ 3 1)
+
+; some extra cycles added by TARGET_SCHED_ADJUST_COST between compare
+; and a following branch, to reduce mispredicts
+(define_function_unit "iu3" 3 0
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "ppc7450"))
+ 1 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "rios2"))
+ 3 1)
+
+(define_function_unit "iu2" 2 0
+ (and (eq_attr "type" "compare,delayed_compare")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc630,ppc750,ppc7400"))
+ 1 1)
+
+; fp compare uses fp unit
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios1"))
+ 9 1)
+
+; rios1 and rios2 have different fpcompare delays
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rios2,ppc630"))
+ 5 1)
+
+; on ppc601 and ppc603, fpcompare takes also 2 cycles from
+; the integer unit
+; here we do not define delays, just occupy the unit. The dependencies
+; will be assigned by the fpcompare definition in the fpu.
+(define_function_unit "iu" 1 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc601,ppc603"))
+ 0 2)
+
+; fp compare uses fp unit
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "rs64a,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630"))
+ 5 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "ppc750,ppc7400,ppc7450"))
+ 3 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fpcompare")
+ (eq_attr "cpu" "mpccore"))
+ 1 1)
+
+(define_function_unit "bpu" 1 0
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "rios1,rios2,rs64a"))
+ 5 1)
+
+(define_function_unit "bpu" 1 0
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "mpccore,ppc403,ppc405,ppc601,ppc603,ppc604,ppc604e,ppc620,ppc630"))
+ 4 1)
+
+(define_function_unit "sru" 1 0
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc750,ppc7400"))
+ 2 2)
+
+(define_function_unit "imuldiv" 1 0
+ (and (eq_attr "type" "mtjmpr")
+ (eq_attr "cpu" "ppc7450"))
+ 2 2)
+
+(define_function_unit "bpu" 1 0
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "rios1,rios2,ppc604"))
+ 4 1)
+
+(define_function_unit "cru" 1 0
+ (and (eq_attr "type" "cr_logical")
+ (eq_attr "cpu" "ppc604e,ppc620,ppc630,rs64a"))
+ 1 1)
+
+; all jumps/branches are executing on the bpu, in 1 cycle, for all machines.
+(define_function_unit "bpu" 1 0
+ (eq_attr "type" "jmpreg")
+ 1 1)
+
+(define_function_unit "bpu" 1 0
+ (eq_attr "type" "branch")
+ 1 1)
+
+; Floating Point Unit
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "rios1"))
+ 2 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "rs64a,mpccore"))
+ 4 2)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc601"))
+ 4 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc603,ppc604,ppc604e,ppc620,ppc750,ppc7400"))
+ 3 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "fp,dmul")
+ (eq_attr "cpu" "ppc7450"))
+ 5 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "rs64a"))
+ 7 2)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "mpccore"))
+ 5 5)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc601"))
+ 5 2)
+
+; is this true?
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc603,ppc750"))
+ 4 2)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc604,ppc604e,ppc620,ppc7400"))
+ 3 1)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios1"))
+ 19 19)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "rs64a"))
+ 31 31)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc601,ppc750,ppc7400"))
+ 17 17)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc7450"))
+ 21 21)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "mpccore"))
+ 10 10)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc603,ppc604,ppc604e,ppc620"))
+ 18 18)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "mpccore"))
+ 17 17)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "rs64a,ppc601,ppc750,ppc604,ppc604e,ppc620,ppc7400"))
+ 31 31)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc7450"))
+ 35 35)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc603"))
+ 33 33)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc620"))
+ 31 31)
+
+(define_function_unit "fpu" 1 0
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc620"))
+ 31 31)
+
+; RIOS2 has two symmetric FPUs.
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "rios2"))
+ 2 1)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "fp")
+ (eq_attr "cpu" "ppc630"))
+ 3 1)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "rios2"))
+ 2 1)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "dmul")
+ (eq_attr "cpu" "ppc630"))
+ 3 1)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "sdiv,ddiv")
+ (eq_attr "cpu" "rios2"))
+ 17 17)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "sdiv")
+ (eq_attr "cpu" "ppc630"))
+ 17 17)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "ddiv")
+ (eq_attr "cpu" "ppc630"))
+ 21 21)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "ssqrt,dsqrt")
+ (eq_attr "cpu" "rios2"))
+ 26 26)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "ssqrt")
+ (eq_attr "cpu" "ppc630"))
+ 18 18)
+
+(define_function_unit "fpu2" 2 0
+ (and (eq_attr "type" "dsqrt")
+ (eq_attr "cpu" "ppc630"))
+ 26 26)
+
+
+;; Start with fixed-point load and store insns. Here we put only the more
+;; complex forms. Basic data transfer is done later.
+
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lbz%U1%X1 %0,%1
+ rldicl %0,%1,0,56"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %2,%1,0,56
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %0,%1,0,56
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extendqidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "zero_extendhidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lhz%U1%X1 %0,%1
+ rldicl %0,%1,0,48"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %2,%1,0,48
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %0,%1,0,48
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lha%U1%X1 %0,%1
+ extsh %0,%1"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ extsh. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ extsh. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "reg_or_mem_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lwz%U1%X1 %0,%1
+ rldicl %0,%1,0,32"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %2,%1,0,32
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ rldicl. %0,%1,0,32
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "lwa_operand" "m,r")))]
+ "TARGET_POWERPC64"
+ "@
+ lwa%U1%X1 %0,%1
+ extsw %0,%1"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ extsw. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ extsw. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (sign_extend:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqisi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqisi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqisi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqisi2_ppc"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqisi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqisi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lbz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:HI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (zero_extend:HI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2"
+ [(use (match_operand:HI 0 "gpc_reg_operand" ""))
+ (use (match_operand:QI 1 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWERPC)
+ emit_insn (gen_extendqihi2_ppc (operands[0], operands[1]));
+ else if (TARGET_POWER)
+ emit_insn (gen_extendqihi2_power (operands[0], operands[1]));
+ else
+ emit_insn (gen_extendqihi2_no_power (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_insn "extendqihi2_ppc"
+ [(set (match_operand:HI 0 "gpc_reg_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC"
+ "extsb %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 "=r,r"))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:HI 2 ""))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC"
+ "@
+ extsb. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:HI (match_operand:QI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (sign_extend:HI (match_dup 1)))]
+ "TARGET_POWERPC && reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:HI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendqihi2_power"
+ [(parallel [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (clobber (scratch:SI))])
+ (parallel [(set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))
+ (clobber (scratch:SI))])]
+ "TARGET_POWER"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "extendqihi2_no_power"
+ [(set (match_dup 2)
+ (ashift:SI (match_operand:QI 1 "gpc_reg_operand" "")
+ (const_int 24)))
+ (set (match_operand:HI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 2)
+ (const_int 24)))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "
+{ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lhz%U1%X1 %0,%1
+ {rlinm|rlwinm} %0,%1,0,0xffff"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {andil.|andi.} %2,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {andil.|andi.} %0,%1,0xffff
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_operand:HI 1 "reg_or_mem_operand" "m,r")))]
+ ""
+ "@
+ lha%U1%X1 %0,%1
+ {exts|extsh} %0,%1"
+ [(set_attr "type" "load,*")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ ""
+ "@
+ {exts.|extsh.} %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "reload_completed"
+ [(set (match_dup 2)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI (match_dup 1)))]
+ ""
+ "@
+ {exts.|extsh.} %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (sign_extend:SI (match_operand:HI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (sign_extend:SI (match_dup 1)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (sign_extend:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Fixed-point arithmetic insns.
+
+;; Discourage ai/addic because of carry but provide it in an alternative
+;; allowing register zero as source.
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_arith_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! add_operand (operands[2], SImode))
+ {
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, SImode);
+
+ /* The ordering here is important for the prolog expander.
+ When space is allocated from the stack, adding 'low' first may
+ produce a temporary deallocation (which would be bad). */
+ emit_insn (gen_addsi3 (tmp, operands[1], GEN_INT (rest)));
+ emit_insn (gen_addsi3 (operands[0], tmp, GEN_INT (low)));
+ DONE;
+ }
+}")
+
+(define_insn "*addsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,?r,r")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,b,r,b")
+ (match_operand:SI 2 "add_operand" "r,I,I,L")))]
+ ""
+ "@
+ {cax|add} %0,%1,%2
+ {cal %0,%2(%1)|addi %0,%1,%2}
+ {ai|addic} %0,%1,%2
+ {cau|addis} %0,%1,%v2"
+ [(set_attr "length" "4,4,4,4")])
+
+(define_insn "addsi3_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "b")
+ (high:SI (match_operand 2 "" ""))))]
+ "TARGET_MACHO && !TARGET_64BIT"
+ "{cau|addis} %0,%1,ha16(%2)"
+ [(set_attr "length" "4")])
+
+(define_insn "*addsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {cax.|add.} %3,%1,%2
+ {ai.|addic.} %3,%1,%2
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*addsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:SI (match_dup 1)
+ (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {cax.|add.} %0,%1,%2
+ {ai.|addic.} %0,%1,%2
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an add that we can't do in one insn into two insns, each of which
+;; does one 16-bit part. This is used by combine. Note that the low-order
+;; add should be last in case the result gets used in an address.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "non_add_cint_operand" "")))]
+ ""
+ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
+"
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, SImode);
+
+ operands[3] = GEN_INT (rest);
+ operands[4] = GEN_INT (low);
+}")
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (not:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ ""
+ "nor %0,%1,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ nor. %2,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (not:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (not:SI (match_dup 1)))]
+ "! TARGET_POWERPC64"
+ "@
+ nor. %0,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (not:SI (match_dup 1)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (not:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (minus:SI (match_operand:SI 1 "reg_or_short_operand" "rI")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ "! TARGET_POWERPC"
+ "{sf%I1|subf%I1c} %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "reg_or_short_operand" "r,I")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWERPC"
+ "@
+ subf %0,%2,%1
+ subfic %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64"
+ "@
+ subf. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (minus:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC"
+ "@
+ {sf.|subfc.} %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (minus:SI (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64"
+ "@
+ subf. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 1)
+ (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (minus:SI (match_dup 1)
+ (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_operand:SI 1 "reg_or_short_operand" "")
+ (match_operand:SI 2 "reg_or_arith_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_addsi3 (operands[0], operands[1],
+ negate_rtx (SImode, operands[2])));
+ DONE;
+ }
+}")
+
+;; For SMIN, SMAX, UMIN, and UMAX, we use DEFINE_EXPAND's that involve a doz[i]
+;; instruction and some auxiliary computations. Then we just have a single
+;; DEFINE_INSN for doz[i] and the define_splits to make them if made by
+;; combine.
+
+(define_expand "sminsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER"
+ "
+{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smin:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 3)))]
+ "")
+
+(define_expand "smaxsi3"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER"
+ "
+{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (smax:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "TARGET_POWER"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt:SI (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0) (plus:SI (match_dup 3) (match_dup 1)))]
+ "")
+
+(define_expand "uminsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (minus:SI (match_dup 2) (match_dup 3)))]
+ "TARGET_POWER"
+ "
+{
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_expand "umaxsi3"
+ [(set (match_dup 3) (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 4) (xor:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_dup 5)))
+ (set (match_dup 3) (if_then_else:SI (gt (match_dup 3) (match_dup 4))
+ (const_int 0)
+ (minus:SI (match_dup 4) (match_dup 3))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 3) (match_dup 1)))]
+ "TARGET_POWER"
+ "
+{
+ operands[3] = gen_reg_rtx (SImode);
+ operands[4] = gen_reg_rtx (SImode);
+ operands[5] = GEN_INT (-2147483647 - 1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %3,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ doz%I2. %0,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (if_then_else:SI (gt (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (if_then_else:SI (gt (match_dup 1) (match_dup 2))
+ (const_int 0)
+ (minus:SI (match_dup 2) (match_dup 1))))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; We don't need abs with condition code because such comparisons should
+;; never be done.
+(define_expand "abssi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))]
+ ""
+ "
+{
+ if (! TARGET_POWER)
+ {
+ emit_insn (gen_abssi2_nopower (operands[0], operands[1]));
+ DONE;
+ }
+}")
+
+(define_insn "abssi2_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWER"
+ "abs %0,%1")
+
+(define_insn "abssi2_nopower"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER"
+ "*
+{
+ return (TARGET_POWERPC)
+ ? \"{srai|srawi} %2,%1,31\;xor %0,%2,%1\;subf %0,%2,%0\"
+ : \"{srai|srawi} %2,%1,31\;xor %0,%2,%1\;{sf|subfc} %0,%2,%0\";
+}"
+ [(set_attr "length" "12")])
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (clobber (match_scratch:SI 2 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn "*nabs_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "nabs %0,%1")
+
+(define_insn "*nabs_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r,r")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "! TARGET_POWER"
+ "*
+{
+ return (TARGET_POWERPC)
+ ? \"{srai|srawi} %2,%1,31\;xor %0,%2,%1\;subf %0,%0,%2\"
+ : \"{srai|srawi} %2,%1,31\;xor %0,%2,%1\;{sf|subfc} %0,%0,%2\";
+}"
+ [(set_attr "length" "12")])
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" ""))))
+ (clobber (match_scratch:SI 2 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 2) (ashiftrt:SI (match_dup 1) (const_int 31)))
+ (set (match_dup 0) (xor:SI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:SI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ ""
+ "neg %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ neg. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 2 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (neg:SI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (match_dup 1)))]
+ "! TARGET_POWERPC64"
+ "@
+ neg. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (neg:SI (match_dup 1)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (neg:SI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "ffssi2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (ffs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))]
+ ""
+ "neg %0,%1\;and %0,%0,%1\;{cntlz|cntlzw} %0,%0\;{sfi|subfic} %0,%0,32"
+ [(set_attr "length" "16")])
+
+(define_expand "mulsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_short_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_mulsi3_mq (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_mulsi3_no_mq (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "mulsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (clobber (match_scratch:SI 3 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn "mulsi3_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))]
+ "! TARGET_POWER"
+ "@
+ {muls|mullw} %0,%1,%2
+ {muli|mulli} %0,%1,%2"
+ [(set (attr "type")
+ (cond [(match_operand:SI 2 "s8bit_cint_operand" "")
+ (const_string "imul3")
+ (match_operand:SI 2 "short_cint_operand" "")
+ (const_string "imul2")]
+ (const_string "imul")))])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %3,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,q"))]
+ "TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {muls.|mullw.} %0,%1,%2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (mult:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (mult:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (mult:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Operand 1 is divided by operand 2; quotient goes to operand
+;; 0 and remainder to operand 3.
+;; ??? At some point, see what, if anything, we can do about if (x % y == 0).
+
+(define_expand "divmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))])]
+ "TARGET_POWER || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=q")
+ (mod:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "divs %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "udivsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")))]
+ "TARGET_POWERPC || (! TARGET_POWER && ! TARGET_POWERPC)"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quous_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_udivsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "udivsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divwu %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*udivsi3_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "divwu %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+;; For powers of two we can do srai/aze for divide and then adjust for
+;; modulus. If it isn't a power of two, FAIL on POWER so divmodsi4 will be
+;; used; for PowerPC, force operands into register and do a normal divide;
+;; for AIX common-mode, use quoss call on register operands.
+(define_expand "divsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 0
+ && exact_log2 (INTVAL (operands[2])) >= 0)
+ ;
+ else if (TARGET_POWERPC)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_divsi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ }
+ else if (TARGET_POWER)
+ FAIL;
+ else
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_quoss_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+}")
+
+(define_insn "divsi3_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "divw %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_insn "*divsi3_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "divw %0,%1,%2"
+ [(set_attr "type" "idiv")])
+
+(define_expand "modsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ int i;
+ rtx temp1;
+ rtx temp2;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) < 0
+ || (i = exact_log2 (INTVAL (operands[2]))) < 0)
+ FAIL;
+
+ temp1 = gen_reg_rtx (SImode);
+ temp2 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_divsi3 (temp1, operands[1], operands[2]));
+ emit_insn (gen_ashlsi3 (temp2, temp1, GEN_INT (i)));
+ emit_insn (gen_subsi3 (operands[0], operands[1], temp2));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (div:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "exact_log2_cint_operand" "N")))]
+ ""
+ "{srai|srawi} %0,%1,%p2\;{aze|addze} %0,%0"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {srai|srawi} %3,%1,%p2\;{aze.|addze.} %3,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "exact_log2_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (div:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (div:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {srai|srawi} %0,%1,%p2\;{aze.|addze.} %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "exact_log2_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (div:SI (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (div:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (udiv:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 4 "register_operand" "2")))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (set (match_operand:SI 2 "register_operand" "=*q")
+ (umod:SI
+ (plus:DI (ashift:DI
+ (zero_extend:DI (match_dup 1)) (const_int 32))
+ (zero_extend:DI (match_dup 4)))
+ (match_dup 3)))]
+ "TARGET_POWER"
+ "div %0,%1,%3"
+ [(set_attr "type" "idiv")])
+
+;; To do unsigned divide we handle the cases of the divisor looking like a
+;; negative number. If it is a constant that is less than 2**31, we don't
+;; have to worry about the branches. So make a few subroutines here.
+;;
+;; First comes the normal case.
+(define_expand "udivmodsi4_normal"
+ [(set (match_dup 4) (const_int 0))
+ (parallel [(set (match_operand:SI 0 "" "")
+ (udiv:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_operand:SI 1 "" "")))
+ (match_operand:SI 2 "" "")))
+ (set (match_operand:SI 3 "" "")
+ (umod:SI (plus:DI (ashift:DI (zero_extend:DI (match_dup 4))
+ (const_int 32))
+ (zero_extend:DI (match_dup 1)))
+ (match_dup 2)))])]
+ "TARGET_POWER"
+ "
+{ operands[4] = gen_reg_rtx (SImode); }")
+
+;; This handles the branches.
+(define_expand "udivmodsi4_tests"
+ [(set (match_operand:SI 0 "" "") (const_int 0))
+ (set (match_operand:SI 3 "" "") (match_operand:SI 1 "" ""))
+ (set (match_dup 5) (compare:CCUNS (match_dup 1) (match_operand:SI 2 "" "")))
+ (set (pc) (if_then_else (ltu (match_dup 5) (const_int 0))
+ (label_ref (match_operand:SI 4 "" "")) (pc)))
+ (set (match_dup 0) (const_int 1))
+ (set (match_dup 3) (minus:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 6) (compare:CC (match_dup 2) (const_int 0)))
+ (set (pc) (if_then_else (lt (match_dup 6) (const_int 0))
+ (label_ref (match_dup 4)) (pc)))]
+ "TARGET_POWER"
+ "
+{ operands[5] = gen_reg_rtx (CCUNSmode);
+ operands[6] = gen_reg_rtx (CCmode);
+}")
+
+(define_expand "udivmodsi4"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (udiv:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))])]
+ ""
+ "
+{
+ rtx label = 0;
+
+ if (! TARGET_POWER)
+ {
+ if (! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_divus_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, 4));
+ DONE;
+ }
+ else
+ FAIL;
+ }
+
+ if (GET_CODE (operands[2]) != CONST_INT || INTVAL (operands[2]) < 0)
+ {
+ operands[2] = force_reg (SImode, operands[2]);
+ label = gen_label_rtx ();
+ emit (gen_udivmodsi4_tests (operands[0], operands[1], operands[2],
+ operands[3], label));
+ }
+ else
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit (gen_udivmodsi4_normal (operands[0], operands[1], operands[2],
+ operands[3]));
+ if (label)
+ emit_label (label);
+
+ DONE;
+}")
+
+;; AIX architecture-independent common-mode multiply (DImode),
+;; divide/modulus, and quotient subroutine calls. Input operands in R3 and
+;; R4; results in R3 and sometimes R4; link register always clobbered by bla
+;; instruction; R0 sometimes clobbered; also, MQ sometimes clobbered but
+;; assumed unused if generating common-mode, so ignore.
+(define_insn "mulh_call"
+ [(set (reg:SI 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4)))
+ (const_int 32))))
+ (clobber (match_scratch:SI 0 "=l"))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mulh"
+ [(set_attr "type" "imul")])
+
+(define_insn "mull_call"
+ [(set (reg:DI 3)
+ (mult:DI (sign_extend:DI (reg:SI 3))
+ (sign_extend:DI (reg:SI 4))))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __mull"
+ [(set_attr "type" "imul")])
+
+(define_insn "divss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (mod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "divus_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (set (reg:SI 4)
+ (umod:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 1 "=x"))
+ (clobber (reg:CC 69))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __divus"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quoss_call"
+ [(set (reg:SI 3)
+ (div:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quoss"
+ [(set_attr "type" "idiv")])
+
+(define_insn "quous_call"
+ [(set (reg:SI 3)
+ (udiv:SI (reg:SI 3) (reg:SI 4)))
+ (clobber (match_scratch:SI 0 "=l"))
+ (clobber (reg:SI 0))
+ (clobber (match_scratch:CC 1 "=x"))
+ (clobber (reg:CC 69))]
+ "! TARGET_POWER && ! TARGET_POWERPC"
+ "bla __quous"
+ [(set_attr "type" "idiv")])
+
+;; Logical instructions
+;; The logical instructions are mostly combined by using match_operator,
+;; but the plain AND insns are somewhat different because there is no
+;; plain 'andi' (only 'andi.'), no plain 'andis', and there are all
+;; those rotate-and-mask operations. Thus, the AND insns come first.
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:SI 2 "and_operand" "?r,T,K,L")))
+ (clobber (match_scratch:CC 3 "=X,X,x,x"))]
+ ""
+ "@
+ and %0,%1,%2
+ {rlinm|rlwinm} %0,%1,0,%m2,%M2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2")
+
+;; Note to set cr's other than cr0 we do the and immediate and then
+;; the test again -- this avoids a mcrf which on the higher end
+;; machines causes an execution serialization
+
+(define_insn "*andsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "! TARGET_POWERPC64"
+ "@
+ and. %3,%1,%2
+ {andil.|andi.} %3,%1,%b2
+ {andiu.|andis.} %3,%1,%u2
+ {rlinm.|rlwinm.} %3,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "and_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*andsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,??y,??y,?y")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "and_operand" "r,K,L,T,r,K,L,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,x,x,X"))]
+ "! TARGET_POWERPC64"
+ "@
+ and. %0,%1,%2
+ {andil.|andi.} %0,%1,%b2
+ {andiu.|andis.} %0,%1,%u2
+ {rlinm.|rlwinm.} %0,%1,0,%m2,%M2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,compare,compare,delayed_compare,compare,compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "and_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "iorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_iorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_iorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xorsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (xor:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_logical_cint_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! logical_operand (operands[2], SImode))
+ {
+ HOST_WIDE_INT value = INTVAL (operands[2]);
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (SImode));
+
+ emit_insn (gen_xorsi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ emit_insn (gen_xorsi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*boolsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:SI 2 "logical_operand" "r,K,L")]))]
+ ""
+ "@
+ %q3 %0,%1,%2
+ {%q3il|%q3i} %0,%1,%b2
+ {%q3iu|%q3is} %0,%1,%u2")
+
+(define_insn "*boolsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operator:SI 3 "boolean_or_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "non_logical_cint_operand" "")]))]
+ ""
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i;
+ i = GEN_INT (INTVAL (operands[2]) & (~ (HOST_WIDE_INT) 0xffff));
+ operands[4] = gen_rtx (GET_CODE (operands[3]), SImode,
+ operands[1], i);
+ i = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ operands[5] = gen_rtx (GET_CODE (operands[3]), SImode,
+ operands[0], i);
+}")
+
+(define_insn "*boolcsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r")]))]
+ ""
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (match_operand:SI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 3 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))]))]
+ ""
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r"))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "! TARGET_POWERPC64"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:SI 4 "boolean_operator"
+ [(not:SI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; maskir insn. We need four forms because things might be in arbitrary
+;; orders. Don't define forms that only set CR fields because these
+;; would modify an input register.
+
+(define_insn "*maskir_internal1"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal2"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_dup 2))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand:SI 3 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0"))))]
+ "TARGET_POWER"
+ "maskir %0,%3,%2")
+
+(define_insn "*maskir_internal5"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_dup 2)
+ (match_operand:SI 3 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 2) (match_dup 3))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0"))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (not:SI (match_operand:SI 2 "gpc_reg_operand" ""))
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_dup 2)))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (not:SI (match_dup 2)) (match_dup 1))
+ (and:SI (match_dup 3) (match_dup 2))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal7"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "r,r")
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 2 "gpc_reg_operand" "")
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 2) (match_dup 3))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*maskir_internal8"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "0,0")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER"
+ "@
+ maskir. %0,%3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ior:SI (and:SI (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (and:SI (not:SI (match_dup 2))
+ (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ior:SI (and:SI (match_dup 3) (match_dup 2))
+ (and:SI (not:SI (match_dup 2)) (match_dup 1))))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Rotate and shift insns, in all their variants. These support shifts,
+;; field inserts and extracts, and various combinations thereof.
+(define_expand "insv"
+ [(set (zero_extract (match_operand 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand 3 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[0]) == DImode)
+ emit_insn (gen_insvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_insvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "insvsi"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "gpc_reg_operand" "r"))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvsi_internal1"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashift:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvsi_internal2"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (ashiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvsi_internal3"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (lshiftrt:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")))]
+ "(32 - (INTVAL (operands[4]) & 31)) >= INTVAL (operands[1])"
+ "*
+{
+ int shift = INTVAL (operands[4]) & 31;
+ int start = INTVAL (operands[2]) & 31;
+ int size = INTVAL (operands[1]) & 31;
+
+ operands[4] = GEN_INT (32 - shift - start - size);
+ operands[1] = GEN_INT (start + size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h4,%h2,%h1\";
+}")
+
+(define_insn "*insvsi_internal4"
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (zero_extract:SI (match_operand:SI 3 "gpc_reg_operand" "r")
+ (match_operand:SI 4 "const_int_operand" "i")
+ (match_operand:SI 5 "const_int_operand" "i")))]
+ "INTVAL (operands[4]) >= INTVAL (operands[1])"
+ "*
+{
+ int extract_start = INTVAL (operands[5]) & 31;
+ int extract_size = INTVAL (operands[4]) & 31;
+ int insert_start = INTVAL (operands[2]) & 31;
+ int insert_size = INTVAL (operands[1]) & 31;
+
+/* Align extract field with insert field */
+ operands[5] = GEN_INT (extract_start + extract_size - insert_start - insert_size);
+ operands[1] = GEN_INT (insert_start + insert_size - 1);
+ return \"{rlimi|rlwimi} %0,%3,%h5,%h2,%h1\";
+}")
+
+(define_insn "insvdi"
+ [(set (zero_extract:DI (match_operand:DI 0 "gpc_reg_operand" "+r")
+ (match_operand:SI 1 "const_int_operand" "i")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "gpc_reg_operand" "r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[2]) & 63;
+ int size = INTVAL (operands[1]) & 63;
+
+ operands[1] = GEN_INT (64 - start - size);
+ return \"rldimi %0,%3,%H1,%H2\";
+}")
+
+(define_expand "extzv"
+ [(set (match_operand 0 "gpc_reg_operand" "")
+ (zero_extract (match_operand 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+ "
+{
+ /* Do not handle 16/8 bit structures that fit in HI/QI modes directly, since
+ the (SUBREG:SI (REG:HI xxx)) that is otherwise generated can confuse the
+ compiler if the address of the structure is taken later. */
+ if (GET_CODE (operands[0]) == SUBREG
+ && (GET_MODE_SIZE (GET_MODE (SUBREG_REG (operands[0]))) < UNITS_PER_WORD))
+ FAIL;
+
+ if (TARGET_POWERPC64 && GET_MODE (operands[1]) == DImode)
+ emit_insn (gen_extzvdi (operands[0], operands[1], operands[2], operands[3]));
+ else
+ emit_insn (gen_extzvsi (operands[0], operands[1], operands[2], operands[3]));
+ DONE;
+}")
+
+(define_insn "extzvsi"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm|rlwinm} %0,%1,%3,%s2,31\";
+}")
+
+(define_insn "*extzvsi_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ /* If the bitfield being tested fits in the upper or lower half of a
+ word, it is possible to use andiu. or andil. to test it. This is
+ useful because the condition register set-use delay is smaller for
+ andi[ul]. than for rlinm. This doesn't work when the starting bit
+ position is 0 because the LT and GT bits may be set wrong. */
+
+ if ((start > 0 && start + size <= 16) || start >= 16)
+ {
+ operands[3] = GEN_INT (((1 << (16 - (start & 15)))
+ - (1 << (16 - (start & 15) - size))));
+ if (start < 16)
+ return \"{andiu.|andis.} %4,%1,%3\";
+ else
+ return \"{andil.|andi.} %4,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %4,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (zero_extract:SI (match_dup 1) (match_dup 2)
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*extzvsi_internal2"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 31;
+ int size = INTVAL (operands[2]) & 31;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ if (start >= 16 && start + size == 32)
+ {
+ operands[3] = GEN_INT ((1 << (32 - start)) - 1);
+ return \"{andil.|andi.} %0,%1,%3\";
+ }
+
+ if (start + size >= 32)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ return \"{rlinm.|rlwinm.} %0,%1,%3,%s2,31\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extract:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extract:SI (match_dup 1) (match_dup 2) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "extzvdi"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl %0,%1,%3,%2\";
+}")
+
+(define_insn "*extzvdi_internal1"
+ [(set (match_operand:CC 0 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %4,%1,%3,%2\";
+}")
+
+(define_insn "*extzvdi_internal2"
+ [(set (match_operand:CC 4 "gpc_reg_operand" "=x")
+ (compare:CC (zero_extract:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")
+ (match_operand:SI 3 "const_int_operand" "i"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extract:DI (match_dup 1) (match_dup 2) (match_dup 3)))]
+ "TARGET_POWERPC64"
+ "*
+{
+ int start = INTVAL (operands[3]) & 63;
+ int size = INTVAL (operands[2]) & 63;
+
+ if (start + size >= 64)
+ operands[3] = const0_rtx;
+ else
+ operands[3] = GEN_INT (start + size);
+ operands[2] = GEN_INT (64 - size);
+ return \"rldicl. %0,%1,%3,%2\";
+}")
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xffffffff")
+
+(define_insn "*rotlsi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xffffffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xffffffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (rotate:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (rotate:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal4"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri"))
+ (match_operand:SI 3 "mask_operand" "T")))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,%m3,%M3")
+
+(define_insn "*rotlsi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {rl%I2nm.|rlw%I2nm.} %4,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (rotate:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64"
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:SI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (rotate:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal7"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")) 0)))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xff")
+
+(define_insn "*rotlsi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:QI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal10"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")) 0)))]
+ ""
+ "{rl%I2nm|rlw%I2nm} %0,%1,%h2,0xffff")
+
+(define_insn "*rotlsi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %3,%1,%h2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (rotate:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotlsi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ ""
+ "@
+ {rl%I2nm.|rlw%I2nm.} %0,%1,%h2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:SI
+ (subreg:HI
+ (rotate:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (rotate:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Note that we use "sle." instead of "sl." so that we can set
+;; SHIFT_COUNT_TRUNCATED.
+
+(define_expand "ashlsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashlsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashlsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashlsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ sle %0,%1,%2
+ {sli|slwi} %0,%1,%h2")
+
+(define_insn "ashlsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "! TARGET_POWER"
+ "{sl|slw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %3,%1,%2
+ {sli.|slwi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER && ! TARGET_POWERPC64"
+ "@
+ {sl|slw}%I2. %3,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ sle. %0,%1,%2
+ {sli.|slwi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && ! TARGET_POWERPC64"
+ "@
+ {sl|slw}%I2. %0,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashift:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashift:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "T")))]
+ "includes_lshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%h2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "! TARGET_POWERPC64 && includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64 && includes_lshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%h2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (ashift:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64 && includes_lshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (ashift:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; The AIX assembler mis-handles "sri x,x,0", so write that case as
+;; "sli x,x,0".
+(define_expand "lshrsi3"
+ [(use (match_operand:SI 0 "gpc_reg_operand" ""))
+ (use (match_operand:SI 1 "gpc_reg_operand" ""))
+ (use (match_operand:SI 2 "reg_or_cint_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_lshrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_lshrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "lshrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i")))
+ (clobber (match_scratch:SI 3 "=q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre %0,%1,%2
+ mr %0,%1
+ {s%A2i|s%A2wi} %0,%1,%h2")
+
+(define_insn "lshrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri")))]
+ "! TARGET_POWER"
+ "@
+ mr %0,%1
+ {sr|srw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,X,r,r,X,r"))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %3,%1,%2
+ mr. %1,%1
+ {s%A2i.|s%A2wi.} %3,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri,O,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=X,r,X,r"))]
+ "! TARGET_POWER && ! TARGET_POWERPC64"
+ "@
+ mr. %1,%1
+ {sr|srw}%I2. %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,?y,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,i,r,O,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,X,q,X,X"))]
+ "TARGET_POWER"
+ "@
+ sre. %0,%1,%2
+ mr. %0,%1
+ {s%A2i.|s%A2wi.} %0,%1,%h2
+ #
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,4,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "O,ri,O,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && ! TARGET_POWERPC64"
+ "@
+ mr. %0,%1
+ {sr|srw}%I2. %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:SI 3 "mask_operand" "T")))]
+ "includes_rshift_p (operands[2], operands[3])"
+ "{rlinm|rlwinm} %0,%1,%s2,%m3,%M3")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=r,r"))]
+ "! TARGET_POWERPC64 && includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %4,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "mask_operand" "T,T"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64 && includes_rshift_p (operands[2], operands[3])"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,%m3,%M3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:SI 3 "mask_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "! TARGET_POWERPC64 && includes_rshift_p (operands[2], operands[3]) && reload_completed"
+ [(set (match_dup 0)
+ (and:SI (lshiftrt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:QI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:QI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (255)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:QI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i")) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "{rlinm|rlwinm} %0,%1,%s2,0xffff")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %3,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:SI (subreg:HI
+ (lshiftrt:SI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535))"
+ "@
+ {rlinm.|rlwinm.} %0,%1,%s2,0xffff
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (zero_extend:SI
+ (subreg:HI
+ (lshiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))]
+ "includes_rshift_p (operands[2], GEN_INT (65535)) && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:SI (subreg:HI (lshiftrt:SI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (ashiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (lshiftrt:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 31)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_insn ""
+ [(set (zero_extract:SI (match_operand:SI 0 "gpc_reg_operand" "+r")
+ (const_int 1)
+ (match_operand:SI 1 "gpc_reg_operand" "r"))
+ (zero_extract:SI (match_operand:SI 2 "gpc_reg_operand" "r")
+ (const_int 1)
+ (const_int 0)))]
+ "TARGET_POWER"
+ "rrib %0,%1,%2")
+
+(define_expand "ashrsi3"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ ""
+ "
+{
+ if (TARGET_POWER)
+ emit_insn (gen_ashrsi3_power (operands[0], operands[1], operands[2]));
+ else
+ emit_insn (gen_ashrsi3_no_power (operands[0], operands[1], operands[2]));
+ DONE;
+}")
+
+(define_insn "ashrsi3_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i")))
+ (clobber (match_scratch:SI 3 "=q,X"))]
+ "TARGET_POWER"
+ "@
+ srea %0,%1,%2
+ {srai|srawi} %0,%1,%h2")
+
+(define_insn "ashrsi3_no_power"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "! TARGET_POWER"
+ "{sra|sraw}%I2 %0,%1,%h2")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r,r,r"))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %3,%1,%2
+ {srai.|srawi.} %3,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "! TARGET_POWER"
+ "@
+ {sra|sraw}%I2. %3,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,i,r,i"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 "=q,X,q,X"))]
+ "TARGET_POWER"
+ "@
+ srea. %0,%1,%2
+ {srai.|srawi.} %0,%1,%h2
+ #
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER"
+ "@
+ {sra|sraw}%I2. %0,%1,%h2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Floating-point insns, excluding normal data motion.
+;;
+;; PowerPC has a full set of single-precision floating point instructions.
+;;
+;; For the POWER architecture, we pretend that we have both SFmode and
+;; DFmode insns, while, in fact, all fp insns are actually done in double.
+;; The only conversions we will do will be when storing to memory. In that
+;; case, we will use the "frsp" instruction before storing.
+;;
+;; Note that when we store into a single-precision memory location, we need to
+;; use the frsp insn first. If the register being stored isn't dead, we
+;; need a scratch register for the frsp. But this is difficult when the store
+;; is done by reload. It is not incorrect to do the frsp on the register in
+;; this case, we just lose precision that we would have otherwise gotten but
+;; is not guaranteed. Perhaps this should be tightened up at some point.
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float_extend:DF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]))
+ return \"\";
+ else
+ return \"fmr %0,%1\";
+}"
+ [(set_attr "type" "fp")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "aux_truncdfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "gpc_reg_operand" "f")] 0))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "frsp %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (abs:SF (match_operand:SF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_expand "addsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "fadds %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "subsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "fsubs %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_expand "mulsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "fmuls %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_expand "divsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "fdivs %0,%1,%2"
+ [(set_attr "type" "sdiv")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (div:SF (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "fmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "fmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "fnmadds %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (plus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "fnmsubs %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (neg:SF (minus:SF (mult:SF (match_operand:SF 1 "gpc_reg_operand" "%f")
+ (match_operand:SF 2 "gpc_reg_operand" "f"))
+ (match_operand:SF 3 "gpc_reg_operand" "f"))))]
+ "! TARGET_POWERPC && TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_expand "sqrtsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT"
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GPOPT && TARGET_HARD_FLOAT"
+ "fsqrts %0,%1"
+ [(set_attr "type" "ssqrt")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWER2 && TARGET_HARD_FLOAT"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+;; For MIN, MAX, and conditional move, we use DEFINE_EXPAND's that involve a
+;; fsel instruction and some auxiliary computations. Then we just have a
+;; single DEFINE_INSN for fsel and the define_splits to make them if made by
+;; combine.
+(define_expand "maxsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "minsf3"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operator:SF 3 "min_max_operator"
+ [(match_operand:SF 1 "gpc_reg_operand" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "gpc_reg_operand" "")
+ (match_operand:SF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fselsfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fseldfsf4"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (if_then_else:SF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:SF 2 "gpc_reg_operand" "f")
+ (match_operand:SF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fneg %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (abs:DF (match_operand:DF 1 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT"
+ "fnabs %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "{fa|fadd} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "{fs|fsub} %0,%1,%2"
+ [(set_attr "type" "fp")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "{fm|fmul} %0,%1,%2"
+ [(set_attr "type" "dmul")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (div:DF (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "{fd|fdiv} %0,%1,%2"
+ [(set_attr "type" "ddiv")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fma|fmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fms|fmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (plus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fnma|fnmadd} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (neg:DF (minus:DF (mult:DF (match_operand:DF 1 "gpc_reg_operand" "%f")
+ (match_operand:DF 2 "gpc_reg_operand" "f"))
+ (match_operand:DF 3 "gpc_reg_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_FUSED_MADD"
+ "{fnms|fnmsub} %0,%1,%2,%3"
+ [(set_attr "type" "dmul")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "(TARGET_PPC_GPOPT || TARGET_POWER2) && TARGET_HARD_FLOAT"
+ "fsqrt %0,%1"
+ [(set_attr "type" "dsqrt")])
+
+;; The conditional move instructions allow us to perform max and min
+;; operations even when
+
+(define_expand "maxdf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 1)
+ (match_dup 2)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "{ rs6000_emit_minmax (operands[0], SMAX, operands[1], operands[2]); DONE;}")
+
+(define_expand "mindf3"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" ""))
+ (match_dup 2)
+ (match_dup 1)))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "{ rs6000_emit_minmax (operands[0], SMIN, operands[1], operands[2]); DONE;}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operator:DF 3 "min_max_operator"
+ [(match_operand:DF 1 "gpc_reg_operand" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")]))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ [(const_int 0)]
+ "
+{ rs6000_emit_minmax (operands[0], GET_CODE (operands[3]),
+ operands[1], operands[2]);
+ DONE;
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "gpc_reg_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" "")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "
+{
+ if (rs6000_emit_cmove (operands[0], operands[1], operands[2], operands[3]))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_insn "*fseldfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+(define_insn "*fselsfdf4"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (if_then_else:DF (ge (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 4 "zero_fp_constant" "F"))
+ (match_operand:DF 2 "gpc_reg_operand" "f")
+ (match_operand:DF 3 "gpc_reg_operand" "f")))]
+ "TARGET_PPC_GFXOPT"
+ "fsel %0,%1,%2,%3"
+ [(set_attr "type" "fp")])
+
+;; Conversions to and from floating-point.
+
+; For each of these conversions, there is a define_expand, a define_insn
+; with a '#' template, and a define_split (with C code). The idea is
+; to allow constant folding with the template of the define_insn,
+; then to have the insns split later (between sched1 and final).
+
+(define_expand "floatsidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))
+ (clobber (match_dup 6))])]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "
+{
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, rs6000_float_const (\"4503601774854144\", DFmode));
+ operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ operands[5] = gen_reg_rtx (DFmode);
+ operands[6] = gen_reg_rtx (SImode);
+}")
+
+(define_insn "*floatsidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "memory_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:SI 6 "gpc_reg_operand" "=r"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "#"
+ [(set_attr "length" "24")])
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_operand:SI 2 "gpc_reg_operand" ""))
+ (use (match_operand:DF 3 "gpc_reg_operand" ""))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" ""))
+ (clobber (match_operand:SI 6 "gpc_reg_operand" ""))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_operand:SI 2 "gpc_reg_operand" ""))
+ (use (match_operand:DF 3 "gpc_reg_operand" ""))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" ""))
+ (clobber (match_operand:SI 6 "gpc_reg_operand" ""))]
+ "
+{
+ rtx lowword, highword;
+ if (GET_CODE (operands[4]) != MEM)
+ abort();
+ highword = XEXP (operands[4], 0);
+ lowword = plus_constant (highword, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_insn (gen_xorsi3 (operands[6], operands[1],
+ GEN_INT (~ (HOST_WIDE_INT) 0x7fffffff)));
+ emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[6]);
+ emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}")
+
+(define_expand "floatunssidf2"
+ [(parallel [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_dup 2))
+ (use (match_dup 3))
+ (clobber (match_dup 4))
+ (clobber (match_dup 5))])]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "
+{
+ operands[2] = force_reg (SImode, GEN_INT (0x43300000));
+ operands[3] = force_reg (DFmode, rs6000_float_const (\"4503599627370496\", DFmode));
+ operands[4] = assign_stack_temp (DFmode, GET_MODE_SIZE (DFmode), 0);
+ operands[5] = gen_reg_rtx (DFmode);
+}")
+
+(define_insn "*floatunssidf2_internal"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=&f")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (use (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (use (match_operand:DF 3 "gpc_reg_operand" "f"))
+ (clobber (match_operand:DF 4 "memory_operand" "=o"))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" "=f"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "#"
+ [(set_attr "length" "20")])
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_operand:SI 2 "gpc_reg_operand" ""))
+ (use (match_operand:DF 3 "gpc_reg_operand" ""))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" ""))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (unsigned_float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (use (match_operand:SI 2 "gpc_reg_operand" ""))
+ (use (match_operand:DF 3 "gpc_reg_operand" ""))
+ (clobber (match_operand:DF 4 "offsettable_mem_operand" ""))
+ (clobber (match_operand:DF 5 "gpc_reg_operand" ""))]
+ "
+{
+ rtx lowword, highword;
+ if (GET_CODE (operands[4]) != MEM)
+ abort();
+ highword = XEXP (operands[4], 0);
+ lowword = plus_constant (highword, 4);
+ if (! WORDS_BIG_ENDIAN)
+ {
+ rtx tmp;
+ tmp = highword; highword = lowword; lowword = tmp;
+ }
+
+ emit_move_insn (gen_rtx_MEM (SImode, lowword), operands[1]);
+ emit_move_insn (gen_rtx_MEM (SImode, highword), operands[2]);
+ emit_move_insn (operands[5], operands[4]);
+ emit_insn (gen_subdf3 (operands[0], operands[5], operands[3]));
+ DONE;
+}")
+
+(define_expand "fix_truncdfsi2"
+ [(parallel [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
+ (clobber (match_dup 2))
+ (clobber (match_dup 3))])]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT"
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = assign_stack_temp (DImode, GET_MODE_SIZE (DImode), 0);
+}")
+
+(define_insn "*fix_truncdfsi2_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "f")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" "=f"))
+ (clobber (match_operand:DI 3 "memory_operand" "=o"))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT"
+ "#"
+ [(set_attr "length" "16")])
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" ""))
+ (clobber (match_operand:DI 3 "offsettable_mem_operand" ""))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_operand:DF 1 "gpc_reg_operand" "")))
+ (clobber (match_operand:DI 2 "gpc_reg_operand" ""))
+ (clobber (match_operand:DI 3 "offsettable_mem_operand" ""))]
+ "
+{
+ rtx lowword;
+ if (GET_CODE (operands[3]) != MEM)
+ abort();
+ lowword = XEXP (operands[3], 0);
+ if (WORDS_BIG_ENDIAN)
+ lowword = plus_constant (lowword, 4);
+
+ emit_insn (gen_fctiwz (operands[2], operands[1]));
+ emit_move_insn (operands[3], operands[2]);
+ emit_move_insn (operands[0], gen_rtx_MEM (SImode, lowword));
+ DONE;
+}")
+
+; Here, we use (set (reg) (unspec:DI [(fix:SI ...)] 10))
+; rather than (set (subreg:SI (reg)) (fix:SI ...))
+; because the first makes it clear that operand 0 is not live
+; before the instruction.
+(define_insn "fctiwz"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (unspec:DI [(fix:SI (match_operand:DF 1 "gpc_reg_operand" "f"))] 10))]
+ "(TARGET_POWER2 || TARGET_POWERPC) && TARGET_HARD_FLOAT"
+ "{fcirz|fctiwz} %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float:DF (match_operand:DI 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "fcfid %0,%1"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=f")
+ (fix:DI (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT"
+ "fctidz %0,%1"
+ [(set_attr "type" "fp")])
+
+;; Define the DImode operations that can be done in a small number
+;; of instructions. The & constraints are to prevent the register
+;; allocator from allocating registers that overlap with the inputs
+;; (for example, having an input in 7,8 and an output in 6,7). We
+;; also allow for the output being the same as one of the inputs.
+
+(define_insn "*adddi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,0,0")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %L0,%L1,%L2\;{ae|adde} %0,%1,%2\"
+ : \"{ai|addic} %L0,%L1,%2\;{a%G2e|add%G2e} %0,%1\";
+ else
+ return (GET_CODE (operands[2])) != CONST_INT
+ ? \"{a|addc} %0,%1,%2\;{ae|adde} %L0,%L1,%L2\"
+ : \"{ai|addic} %0,%1,%2\;{a%G2e|add%G2e} %L0,%L1\";
+}"
+ [(set_attr "length" "8")])
+
+(define_insn "*subdi3_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,&r,r,r,r")
+ (minus:DI (match_operand:DI 1 "reg_or_short_operand" "r,I,0,r,I")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r,r,0,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (WORDS_BIG_ENDIAN)
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %L0,%L2,%L1\;{sfe|subfe} %0,%2,%1\"
+ : \"{sfi|subfic} %L0,%L2,%1\;{sf%G1e|subf%G1e} %0,%2\";
+ else
+ return (GET_CODE (operands[1]) != CONST_INT)
+ ? \"{sf|subfc} %0,%2,%1\;{sfe|subfe} %L0,%L2,%L1\"
+ : \"{sfi|subfic} %0,%2,%1\;{sf%G1e|subf%G1e} %L0,%L2\";
+}"
+ [(set_attr "length" "8")])
+
+(define_insn "*negdi2_noppc64"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"{sfi|subfic} %L0,%L1,0\;{sfze|subfze} %0,%1\"
+ : \"{sfi|subfic} %0,%1,0\;{sfze|subfze} %L0,%L1\";
+}"
+ [(set_attr "length" "8")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "! TARGET_POWERPC64"
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mull_call ());
+ if (WORDS_BIG_ENDIAN)
+ emit_move_insn (operands[0], gen_rtx_REG (DImode, 3));
+ else
+ {
+ emit_move_insn (operand_subword (operands[0], 0, 0, DImode),
+ gen_rtx_REG (SImode, 3));
+ emit_move_insn (operand_subword (operands[0], 1, 0, DImode),
+ gen_rtx_REG (SImode, 4));
+ }
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_mulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "mulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2\;mfmq %L0"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*mulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhw %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhw %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_dup 1))
+ (sign_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsidi3_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsidi3_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_insn "*umulsidi3_no_mq"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" "r"))))]
+ "TARGET_POWERPC && ! TARGET_POWER && ! TARGET_POWERPC64"
+ "*
+{
+ return (WORDS_BIG_ENDIAN)
+ ? \"mulhwu %0,%1,%2\;mullw %L0,%1,%2\"
+ : \"mulhwu %L0,%1,%2\;mullw %0,%1,%2\";
+}"
+ [(set_attr "type" "imul")
+ (set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "gpc_reg_operand" ""))))]
+ "TARGET_POWERPC && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_dup 1))
+ (zero_extend:DI (match_dup 2)))
+ (const_int 32))))
+ (set (match_dup 4)
+ (mult:SI (match_dup 1)
+ (match_dup 2)))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ operands[3] = operand_subword (operands[0], endian, 0, DImode);
+ operands[4] = operand_subword (operands[0], 1 - endian, 0, DImode);
+}")
+
+(define_expand "smulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ ""
+ "
+{
+ if (! TARGET_POWER && ! TARGET_POWERPC)
+ {
+ emit_move_insn (gen_rtx_REG (SImode, 3), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, 4), operands[2]);
+ emit_insn (gen_mulh_call ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, 3));
+ DONE;
+ }
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_smulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "smulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWER"
+ "mul %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*smulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhw %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_expand "umulsi3_highpart"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" ""))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "")))
+ (const_int 32))))]
+ "TARGET_POWERPC"
+ "
+{
+ if (TARGET_POWER)
+ {
+ emit_insn (gen_umulsi3_highpart_mq (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsi3_highpart_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))
+ (clobber (match_scratch:SI 3 "=q"))]
+ "TARGET_POWERPC && TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+(define_insn "*umulsi3_highpart_no_mq"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (const_int 32))))]
+ "TARGET_POWERPC && ! TARGET_POWER"
+ "mulhwu %0,%1,%2"
+ [(set_attr "type" "imul")])
+
+;; If operands 0 and 2 are in the same register, we have a problem. But
+;; operands 0 and 1 (the usual case) can be in the same register. That's
+;; why we have the strange constraints below.
+(define_insn "ashldi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {sli|slwi} %0,%L1,%h2\;{cal %L0,0(0)|li %L0,0}
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2
+ sl%I2q %L0,%L1,%h2\;sll%I2q %0,%1,%h2"
+ [(set_attr "length" "8")])
+
+(define_insn "lshrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,&r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,0,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "M,i,r,r")))
+ (clobber (match_scratch:SI 3 "=X,q,q,q"))]
+ "TARGET_POWER"
+ "@
+ {s%A2i|s%A2wi} %L0,%1,%h2\;{cal %0,0(0)|li %0,0}
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2
+ sr%I2q %0,%1,%h2\;srl%I2q %L0,%L1,%h2"
+ [(set_attr "length" "8")])
+
+;; Shift by a variable amount is too complex to be worth open-coding. We
+;; just handle shifts by constants.
+(define_insn "ashrdi3_power"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "M,i")))
+ (clobber (match_scratch:SI 3 "=X,q"))]
+ "TARGET_POWER"
+ "@
+ {srai|srawi} %0,%1,31\;{srai|srawi} %L0,%1,%h2
+ sraiq %0,%1,%h2\;srliq %L0,%L1,%h2"
+ [(set_attr "length" "8")])
+
+;; PowerPC64 DImode operations.
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_add_cint64_operand" "")))]
+ ""
+ "
+{
+ if (! TARGET_POWERPC64)
+ {
+ if (non_short_cint_operand (operands[2], DImode))
+ FAIL;
+ }
+ else
+ if (GET_CODE (operands[2]) == CONST_INT
+ && ! add_operand (operands[2], DImode))
+ {
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, DImode);
+
+ if (!CONST_OK_FOR_LETTER_P (rest, 'L'))
+ FAIL;
+
+ /* The ordering here is important for the prolog expander.
+ When space is allocated from the stack, adding 'low' first may
+ produce a temporary deallocation (which would be bad). */
+ emit_insn (gen_adddi3 (tmp, operands[1], GEN_INT (rest)));
+ emit_insn (gen_adddi3 (operands[0], tmp, GEN_INT (low)));
+ DONE;
+ }
+}")
+
+;; Discourage ai/addic because of carry but provide it in an alternative
+;; allowing register zero as source.
+
+(define_insn "*adddi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,?r,r")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,b,r,b")
+ (match_operand:DI 2 "add_operand" "r,I,I,L")))]
+ "TARGET_POWERPC64"
+ "@
+ add %0,%1,%2
+ addi %0,%1,%2
+ addic %0,%1,%2
+ addis %0,%1,%v2")
+
+(define_insn "*adddi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ add. %3,%1,%2
+ addic. %3,%1,%2
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*adddi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I,r,I"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ add. %0,%1,%2
+ addic. %0,%1,%2
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,4,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (plus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an add that we can't do in one insn into two insns, each of which
+;; does one 16-bit part. This is used by combine. Note that the low-order
+;; add should be last in case the result gets used in an address.
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "non_add_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
+"
+{
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000);
+ HOST_WIDE_INT rest = trunc_int_for_mode (val - low, DImode);
+
+ operands[4] = GEN_INT (low);
+ if (CONST_OK_FOR_LETTER_P (rest, 'L'))
+ operands[3] = GEN_INT (rest);
+ else if (! no_new_pseudos)
+ {
+ operands[3] = gen_reg_rtx (DImode);
+ emit_move_insn (operands[3], operands[2]);
+ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "one_cmpldi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (not:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "nor %0,%1,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ nor. %2,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (not:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (not:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ nor. %0,%1,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (not:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (not:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (minus:DI (match_operand:DI 1 "reg_or_short_operand" "r,I")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")))]
+ "TARGET_POWERPC64"
+ "@
+ subf %0,%2,%1
+ subfic %0,%2,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ subf. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (minus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ subf. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (minus:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (minus:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (minus:DI (match_operand:DI 1 "reg_or_short_operand" "")
+ (match_operand:DI 2 "reg_or_sub_cint64_operand" "")))]
+ ""
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_adddi3 (operands[0], operands[1],
+ negate_rtx (DImode, operands[2])));
+ DONE;
+ }
+}")
+
+(define_insn "absdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0")))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "sradi %2,%1,63\;xor %0,%2,%1\;subf %0,%2,%0"
+ [(set_attr "length" "12")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 0) (match_dup 2)))]
+ "")
+
+(define_insn "*nabsdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r,r")
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,0"))))
+ (clobber (match_scratch:DI 2 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "sradi %2,%1,63\;xor %0,%2,%1\;subf %0,%0,%2"
+ [(set_attr "length" "12")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" ""))))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (ashiftrt:DI (match_dup 1) (const_int 63)))
+ (set (match_dup 0) (xor:DI (match_dup 2) (match_dup 1)))
+ (set (match_dup 0) (minus:DI (match_dup 2) (match_dup 0)))]
+ "")
+
+(define_expand "negdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (neg:DI (match_operand:DI 1 "gpc_reg_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "neg %0,%1")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ neg. %2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2)
+ (neg:DI (match_dup 1)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 2)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (neg:DI (match_dup 1)))]
+ "TARGET_POWERPC64"
+ "@
+ neg. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (neg:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (neg:DI (match_dup 1)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (neg:DI (match_dup 1)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "ffsdi2"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=&r")
+ (ffs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "neg %0,%1\;and %0,%0,%1\;cntlzd %0,%0\;subfic %0,%0,64"
+ [(set_attr "length" "16")])
+
+(define_insn "muldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (mult:DI (match_operand:DI 1 "gpc_reg_operand" "%r")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "mulld %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "smuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (sign_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (sign_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhd %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_insn "umuldi3_highpart"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "gpc_reg_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (const_int 64))))]
+ "TARGET_POWERPC64"
+ "mulhdu %0,%1,%2"
+ [(set_attr "type" "lmul")])
+
+(define_expand "divdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) > 0
+ && exact_log2 (INTVAL (operands[2])) >= 0)
+ ;
+ else
+ operands[2] = force_reg (DImode, operands[2]);
+}")
+
+(define_expand "moddi3"
+ [(use (match_operand:DI 0 "gpc_reg_operand" ""))
+ (use (match_operand:DI 1 "gpc_reg_operand" ""))
+ (use (match_operand:DI 2 "reg_or_cint_operand" ""))]
+ "TARGET_POWERPC64"
+ "
+{
+ int i;
+ rtx temp1;
+ rtx temp2;
+
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 0
+ || (i = exact_log2 (INTVAL (operands[2]))) < 0)
+ FAIL;
+
+ temp1 = gen_reg_rtx (DImode);
+ temp2 = gen_reg_rtx (DImode);
+
+ emit_insn (gen_divdi3 (temp1, operands[1], operands[2]));
+ emit_insn (gen_ashldi3 (temp2, temp1, GEN_INT (i)));
+ emit_insn (gen_subdi3 (operands[0], operands[1], temp2));
+ DONE;
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (div:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "exact_log2_cint_operand" "N")))]
+ "TARGET_POWERPC64"
+ "sradi %0,%1,%p2\;addze %0,%0"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ sradi %3,%1,%p2\;addze. %3,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "exact_log2_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (div:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "exact_log2_cint_operand" "N,N"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (div:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ sradi %0,%1,%p2\;addze. %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (div:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "exact_log2_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (div:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (div:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (div:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "divd %0,%1,%2"
+ [(set_attr "type" "ldiv")])
+
+(define_insn "udivdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (udiv:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))]
+ "TARGET_POWERPC64"
+ "divdu %0,%1,%2"
+ [(set_attr "type" "ldiv")])
+
+(define_insn "rotldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,0")
+
+(define_insn "*rotldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %3,%1,%H2,0
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %0,%1,%H2,0
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (rotate:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (rotate:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri"))
+ (match_operand:DI 3 "mask64_operand" "S")))]
+ "TARGET_POWERPC64"
+ "rld%I2c%B3 %0,%1,%H2,%S3")
+
+(define_insn "*rotldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:DI 3 "mask64_operand" "S,S"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2c%B3. %4,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:DI (rotate:DI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri"))
+ (match_operand:DI 3 "mask64_operand" "S,S"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2c%B3. %0,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,56")
+
+(define_insn "*rotldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %3,%1,%H2,56
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:QI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal9"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %0,%1,%H2,56
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:QI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:QI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal10"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,48")
+
+(define_insn "*rotldi3_internal11"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %3,%1,%H2,48
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:HI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal12"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %0,%1,%H2,48
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:HI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:HI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal13"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri")) 0)))]
+ "TARGET_POWERPC64"
+ "rld%I2cl %0,%1,%H2,32")
+
+(define_insn "*rotldi3_internal14"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %3,%1,%H2,32
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (zero_extend:DI (subreg:SI
+ (rotate:DI (match_dup 1)
+ (match_dup 2)) 0)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*rotldi3_internal15"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "ri,ri")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64"
+ "@
+ rld%I2cl. %0,%1,%H2,32
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (zero_extend:DI
+ (subreg:SI
+ (rotate:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" "")) 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (zero_extend:DI (subreg:SI (rotate:DI (match_dup 1) (match_dup 2)) 0)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_ashldi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "sld%I2 %0,%1,%H2"
+ [(set_attr "length" "8")])
+
+(define_insn "*ashldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ sld%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ sld%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashift:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashift:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal4"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "const_int_operand" "n")))]
+ "TARGET_POWERPC64 && includes_rldic_lshift_p (operands[2], operands[3])"
+ "rldic %0,%1,%H2,%W3")
+
+(define_insn "ashldi3_internal5"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_POWERPC64 && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %4,%1,%H2,%W3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal6"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && includes_rldic_lshift_p (operands[2], operands[3])"
+ "@
+ rldic. %0,%1,%H2,%W3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldic_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal7"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "const_int_operand" "i"))
+ (match_operand:DI 3 "mask64_operand" "S")))]
+ "TARGET_POWERPC64 && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "rldicr %0,%1,%H2,%S3")
+
+(define_insn "ashldi3_internal8"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "S,S"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=r,r"))]
+ "TARGET_POWERPC64 && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %4,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 4)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashldi3_internal9"
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:DI 3 "mask64_operand" "S,S"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && includes_rldicr_lshift_p (operands[2], operands[3])"
+ "@
+ rldicr. %0,%1,%H2,%S3
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:DI (ashift:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "const_int_operand" ""))
+ (match_operand:DI 3 "mask64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2)) (match_dup 3)))]
+ "TARGET_POWERPC64 && reload_completed
+ && includes_rldicr_lshift_p (operands[2], operands[3])"
+ [(set (match_dup 0)
+ (and:DI (ashift:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER)
+ {
+ emit_insn (gen_lshrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*lshrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "srd%I2 %0,%1,%H2")
+
+(define_insn "*lshrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ srd%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*lshrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ srd%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (lshiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (lshiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" "")))]
+ "TARGET_POWERPC64 || TARGET_POWER"
+ "
+{
+ if (TARGET_POWERPC64)
+ ;
+ else if (TARGET_POWER && GET_CODE (operands[2]) == CONST_INT)
+ {
+ emit_insn (gen_ashrdi3_power (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ else
+ FAIL;
+}")
+
+(define_insn "*ashrdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri")))]
+ "TARGET_POWERPC64"
+ "srad%I2 %0,%1,%H2")
+
+(define_insn "*ashrdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ srad%I2. %3,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*ashrdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "ri,ri"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ srad%I2. %0,%1,%H2
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (ashiftrt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ashiftrt:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r")
+ (match_operand:DI 2 "and64_operand" "?r,S,K,J")))
+ (clobber (match_scratch:CC 3 "=X,X,x,x"))]
+ "TARGET_POWERPC64"
+ "@
+ and %0,%1,%2
+ rldic%B2 %0,%1,0,%S2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2")
+
+(define_insn "*anddi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,?y,?y,??y,??y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,")
+ (match_operand:DI 2 "and64_operand" "r,S,K,J,r,S,K,J"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r,r,r,r,r,r,r"))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,x,x"))]
+ "TARGET_POWERPC64"
+ "@
+ and. %3,%1,%2
+ rldic%B2. %3,%1,0,%S2
+ andi. %3,%1,%b2
+ andis. %3,%1,%u2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,delayed_compare,compare,compare,compare,delayed_compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "and64_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (and:DI (match_dup 1)
+ (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*anddi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,x,x,?y,?y,??y,??y")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "and64_operand" "r,S,K,J,r,S,K,J"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 "=X,X,X,X,X,X,x,x"))]
+ "TARGET_POWERPC64"
+ "@
+ and. %0,%1,%2
+ rldic%B2. %0,%1,0,%S2
+ andi. %0,%1,%b2
+ andis. %0,%1,%u2
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare,delayed_compare,compare,compare,compare,delayed_compare,compare,compare")
+ (set_attr "length" "4,4,4,4,8,8,8,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC (and:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "and64_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:CC 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 4))])
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (ior:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_iordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_iordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (xor:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_logical_cint_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ if (non_logical_cint_operand (operands[2], DImode))
+ {
+ HOST_WIDE_INT value;
+ rtx tmp = ((no_new_pseudos || rtx_equal_p (operands[0], operands[1]))
+ ? operands[0] : gen_reg_rtx (DImode));
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ value = INTVAL (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ GEN_INT (value & (~ (HOST_WIDE_INT) 0xffff))));
+ }
+ else
+ {
+ value = CONST_DOUBLE_LOW (operands[2]);
+ emit_insn (gen_xordi3 (tmp, operands[1],
+ immed_double_const (value
+ & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode)));
+ }
+
+ emit_insn (gen_xordi3 (operands[0], tmp, GEN_INT (value & 0xffff)));
+ DONE;
+ }
+}")
+
+(define_insn "*booldi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r,r")
+ (match_operand:DI 2 "logical_operand" "r,K,JF")]))]
+ "TARGET_POWERPC64"
+ "@
+ %q3 %0,%1,%2
+ %q3i %0,%1,%b2
+ %q3is %0,%1,%u2")
+
+(define_insn "*booldi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*booldi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "%r,r")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Split an logical operation that we can't do in one insn into two insns,
+;; each of which does one 16-bit part. This is used by combine.
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operator:DI 3 "boolean_or_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "non_logical_cint_operand" "")]))]
+ "TARGET_POWERPC64"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 0) (match_dup 5))]
+"
+{
+ rtx i3,i4;
+
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ {
+ HOST_WIDE_INT value = CONST_DOUBLE_LOW (operands[2]);
+ i3 = immed_double_const (value & (~ (HOST_WIDE_INT) 0xffff),
+ 0, DImode);
+ i4 = GEN_INT (value & 0xffff);
+ }
+ else
+ {
+ i3 = GEN_INT (INTVAL (operands[2])
+ & (~ (HOST_WIDE_INT) 0xffff));
+ i4 = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ }
+ operands[4] = gen_rtx (GET_CODE (operands[3]), DImode,
+ operands[1], i3);
+ operands[5] = gen_rtx (GET_CODE (operands[3]), DImode,
+ operands[0], i4);
+}")
+
+(define_insn "*boolcdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r")]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%2,%1")
+
+(define_insn "*boolcdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %3,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolcdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %0,%2,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (match_operand:DI 2 "gpc_reg_operand" "")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal1"
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 3 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r"))]))]
+ "TARGET_POWERPC64"
+ "%q3 %0,%1,%2")
+
+(define_insn "*boolccdi3_internal2"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %3,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn "*boolccdi3_internal3"
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r"))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" "r,r"))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (match_dup 4))]
+ "TARGET_POWERPC64"
+ "@
+ %q4. %0,%1,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operator:DI 4 "boolean_operator"
+ [(not:DI (match_operand:DI 1 "gpc_reg_operand" ""))
+ (not:DI (match_operand:DI 2 "gpc_reg_operand" ""))])
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_dup 4))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 4))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; Now define ways of moving data around.
+
+;; Elf specific ways of loading addresses for non-PIC code.
+;; The output of this could be r0, but we make a very strong
+;; preference for a base register because it will usually
+;; be needed there.
+(define_insn "elf_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "{liu|lis} %0,%1@ha")
+
+(define_insn "elf_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_ELF && ! TARGET_64BIT"
+ "@
+ {cal|la} %0,%2@l(%1)
+ {ai|addic} %0,%1,%K2")
+
+;; Mach-O PIC trickery.
+(define_insn "macho_high"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=b*r")
+ (high:SI (match_operand 1 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{liu|lis} %0,ha16(%1)")
+
+(define_insn "macho_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lo_sum:SI (match_operand:SI 1 "gpc_reg_operand" "b,!*r")
+ (match_operand 2 "" "")))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "@
+ {cal %0,%a2@l(%1)|la %0,lo16(%2)(%1)}
+ {cal %0,%a2@l(%1)|addic %0,%1,lo16(%2)}")
+
+;; Set up a register with a value from the GOT table
+
+(define_expand "movsi_got"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_operand" "")
+ (match_dup 2)] 8))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "
+{
+ if (GET_CODE (operands[1]) == CONST)
+ {
+ rtx offset = const0_rtx;
+ HOST_WIDE_INT value;
+
+ operands[1] = eliminate_constant_term (XEXP (operands[1], 0), &offset);
+ value = INTVAL (offset);
+ if (value != 0)
+ {
+ rtx tmp = (no_new_pseudos ? operands[0] : gen_reg_rtx (Pmode));
+ emit_insn (gen_movsi_got (tmp, operands[1]));
+ emit_insn (gen_addsi3 (operands[0], tmp, offset));
+ DONE;
+ }
+ }
+
+ operands[2] = rs6000_got_register (operands[1]);
+}")
+
+(define_insn "*movsi_got_internal"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "gpc_reg_operand" "b")] 8))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1"
+ "{l|lwz} %0,%a1@got(%2)"
+ [(set_attr "type" "load")])
+
+;; Used by sched, shorten_branches and final when the GOT pseudo reg
+;; didn't get allocated to a hard register.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (unspec:SI [(match_operand:SI 1 "got_no_const_operand" "")
+ (match_operand:SI 2 "memory_operand" "")] 8))]
+ "DEFAULT_ABI == ABI_V4
+ && flag_pic == 1
+ && (reload_in_progress || reload_completed)"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (unspec:SI [(match_dup 1)(match_dup 0)] 8))]
+ "")
+
+;; For SI, we special-case integers that can't be loaded in one insn. We
+;; do the load 16-bits at a time. We could do this by loading from memory,
+;; and this is even supposed to be faster, but it is simpler not to get
+;; integers in the TOC.
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], SImode); DONE; }")
+
+(define_insn "movsi_low"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (mem:SI (lo_sum:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsi_low_st"
+ [(set (mem:SI (lo_sum:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:SI 0 "gpc_reg_operand" "r"))]
+ "TARGET_MACHO && ! TARGET_64BIT"
+ "{st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movdf_low"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f,!r")
+ (mem:DF (lo_sum:SI (match_operand:SI 1 "register_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && ! TARGET_64BIT"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"lfd %0,lo16(%2)(%1)\";
+ case 1:
+ {
+ rtx operands2[4];
+ operands2[0] = operands[0];
+ operands2[1] = operands[1];
+ operands2[2] = operands[2];
+ operands2[3] = gen_rtx_REG (SImode, PIC_OFFSET_TABLE_REGNUM);
+ output_asm_insn (\"{l|lwz} %0,lo16(%2)(%1)\", operands);
+ /* We cannot rely on ha16(low half)==ha16(high half), alas,
+ although in practice it almost always is. */
+ output_asm_insn (\"{cau|addis} %L0,%3,ha16(%2+4)\", operands2);
+ return (\"{l|lwz} %L0,lo16(%2+4)(%L0)\");
+ }
+ default:
+ abort();
+ }
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "4,12")])
+
+(define_insn "movdf_low_st"
+ [(set (mem:DF (lo_sum:SI (match_operand:SI 1 "register_operand" "b")
+ (match_operand 2 "" "")))
+ (match_operand:DF 0 "gpc_reg_operand" "f"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && ! TARGET_64BIT"
+ "stfd %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f,!r")
+ (mem:SF (lo_sum:SI (match_operand:SI 1 "register_operand" "b,b")
+ (match_operand 2 "" ""))))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && ! TARGET_64BIT"
+ "@
+ lfs %0,lo16(%2)(%1)
+ {l|lwz} %0,lo16(%2)(%1)"
+ [(set_attr "type" "load")
+ (set_attr "length" "4")])
+
+(define_insn "movsf_low_st"
+ [(set (mem:SF (lo_sum:SI (match_operand:SI 1 "register_operand" "b,b")
+ (match_operand 2 "" "")))
+ (match_operand:SF 0 "gpc_reg_operand" "f,!r"))]
+ "TARGET_MACHO && TARGET_HARD_FLOAT && ! TARGET_64BIT"
+ "@
+ stfs %0,lo16(%2)(%1)
+ {st|stw} %0,lo16(%2)(%1)"
+ [(set_attr "type" "store")
+ (set_attr "length" "4")])
+
+(define_insn "*movsi_internal1"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m,r,r,r,r,r,*q,*c*l,*h,*h")
+ (match_operand:SI 1 "input_operand" "r,U,m,r,I,L,n,R,*h,r,r,r,0"))]
+ "gpc_reg_operand (operands[0], SImode)
+ || gpc_reg_operand (operands[1], SImode)"
+ "@
+ mr %0,%1
+ {cal|la} %0,%a1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ #
+ {cal|la} %0,%a1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ mt%0 %1
+ cror 0,0,0"
+ [(set_attr "type" "*,*,load,store,*,*,*,*,*,*,mtjmpr,*,*")
+ (set_attr "length" "4,4,4,4,4,4,8,4,4,4,4,4,4")])
+
+;; Split a load of a large constant into the appropriate two-insn
+;; sequence.
+
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "(unsigned HOST_WIDE_INT) (INTVAL (operands[1]) + 0x8000) >= 0x10000
+ && (INTVAL (operands[1]) & 0xffff) != 0"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (ior:SI (match_dup 0)
+ (match_dup 3)))]
+ "
+{
+ operands[2] = GEN_INT (INTVAL (operands[1]) & (~ (HOST_WIDE_INT) 0xffff));
+ operands[3] = GEN_INT (INTVAL (operands[1]) & 0xffff);
+}")
+
+(define_insn "*movsi_internal2"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r") (match_dup 1))]
+ "! TARGET_POWERPC64"
+ "@
+ mr. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "") (match_dup 1))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], HImode); DONE; }")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:HI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], HImode)
+ || gpc_reg_operand (operands[1], HImode)"
+ "@
+ mr %0,%1
+ lhz%U1%X1 %0,%1
+ sth%U0%X0 %1,%0
+ {lil|li} %0,%w1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ cror 0,0,0"
+ [(set_attr "type" "*,load,store,*,*,*,mtjmpr,*")])
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], QImode); DONE; }")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,m,r,r,*q,*c*l,*h")
+ (match_operand:QI 1 "input_operand" "r,m,r,i,*h,r,r,0"))]
+ "gpc_reg_operand (operands[0], QImode)
+ || gpc_reg_operand (operands[1], QImode)"
+ "@
+ mr %0,%1
+ lbz%U1%X1 %0,%1
+ stb%U0%X0 %1,%0
+ {lil|li} %0,%1
+ mf%1 %0
+ mt%0 %1
+ mt%0 %1
+ cror 0,0,0"
+ [(set_attr "type" "*,load,store,*,*,*,mtjmpr,*")])
+
+;; Here is how to move condition codes around. When we store CC data in
+;; an integer register or memory, we store just the high-order 4 bits.
+;; This lets us not shift in the most common case of CR0.
+(define_expand "movcc"
+ [(set (match_operand:CC 0 "nonimmediate_operand" "")
+ (match_operand:CC 1 "nonimmediate_operand" ""))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "nonimmediate_operand" "=y,x,y,r,r,r,r,m")
+ (match_operand:CC 1 "nonimmediate_operand" "y,r,r,x,y,r,m,r"))]
+ "register_operand (operands[0], CCmode)
+ || register_operand (operands[1], CCmode)"
+ "@
+ mcrf %0,%1
+ mtcrf 128,%1
+ {rlinm|rlwinm} %1,%1,%F0,0xffffffff\;mtcrf %R0,%1\;{rlinm|rlwinm} %1,%1,%f0,0xffffffff
+ mfcr %0
+ mfcr %0\;{rlinm|rlwinm} %0,%0,%f1,0xf0000000
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%U1|stw%U0%U1} %1,%0"
+ [(set_attr "type" "*,*,*,compare,*,*,load,store")
+ (set_attr "length" "*,*,12,*,8,*,*,*")])
+
+;; For floating-point, we normally deal with the floating-point registers
+;; unless -msoft-float is used. The sole exception is that parameter passing
+;; can produce floating-point values in fixed-point registers. Unless the
+;; value is a simple constant or already in memory, we deal with this by
+;; allocating memory and copying the value explicitly via that memory location.
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "")
+ (match_operand:SF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], SFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "const_double_operand" ""))]
+ "reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ long l;
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (rv, l);
+
+ if (! TARGET_POWERPC64)
+ operands[2] = operand_subword (operands[0], 0, 0, SFmode);
+ else
+ operands[2] = gen_lowpart (SImode, operands[0]);
+
+ operands[3] = GEN_INT (trunc_int_for_mode (l, SImode));
+}")
+
+(define_insn "*movsf_hardfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=!r,!r,m,f,f,m,!r,!r")
+ (match_operand:SF 1 "input_operand" "r,m,r,f,m,f,G,Fn"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode)) && TARGET_HARD_FLOAT"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ fmr %0,%1
+ lfs%U1%X1 %0,%1
+ stfs%U0%X0 %1,%0
+ #
+ #"
+ [(set_attr "type" "*,load,store,fp,fpload,fpstore,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,8")])
+
+(define_insn "*movsf_softfloat"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,r")
+ (match_operand:SF 1 "input_operand" "r,m,r,I,L,R,G,Fn"))]
+ "(gpc_reg_operand (operands[0], SFmode)
+ || gpc_reg_operand (operands[1], SFmode)) && TARGET_SOFT_FLOAT"
+ "@
+ mr %0,%1
+ {l%U1%X1|lwz%U1%X1} %0,%1
+ {st%U0%X0|stw%U0%X0} %1,%0
+ {lil|li} %0,%1
+ {liu|lis} %0,%v1
+ {cal|la} %0,%a1
+ #
+ #"
+ [(set_attr "type" "*,load,store,*,*,*,*,*")
+ (set_attr "length" "4,4,4,4,4,4,4,8")])
+
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "")
+ (match_operand:DF 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], DFmode); DONE; }")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT ((value & 0x7fffffff) - (value & 0x80000000));
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "const_double_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = operand_subword (operands[0], endian, 0, DFmode);
+ operands[3] = operand_subword (operands[0], 1 - endian, 0, DFmode);
+ operands[4] = GEN_INT (trunc_int_for_mode (l[endian], SImode));
+ operands[5] = GEN_INT (trunc_int_for_mode (l[1 - endian], SImode));
+}")
+
+(define_split
+ [(set (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "easy_fp_constant" ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && ((GET_CODE (operands[0]) == REG && REGNO (operands[0]) <= 31)
+ || (GET_CODE (operands[0]) == SUBREG
+ && GET_CODE (SUBREG_REG (operands[0])) == REG
+ && REGNO (SUBREG_REG (operands[0])) <= 31))"
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ int endian = (WORDS_BIG_ENDIAN == 0);
+ long l[2];
+ REAL_VALUE_TYPE rv;
+ HOST_WIDE_INT val;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (rv, operands[1]);
+ REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
+
+ operands[2] = gen_lowpart (DImode, operands[0]);
+ /* HIGHPART is lower memory address when WORDS_BIG_ENDIAN. */
+#if HOST_BITS_PER_WIDE_INT >= 64
+ val = ((HOST_WIDE_INT)(unsigned long)l[endian] << 32 |
+ ((HOST_WIDE_INT)(unsigned long)l[1 - endian]));
+
+ operands[3] = immed_double_const (val, -(val < 0), DImode);
+#else
+ operands[3] = immed_double_const (l[1 - endian], l[endian], DImode);
+#endif
+}")
+
+;; Don't have reload use general registers to load a constant. First,
+;; it might not work if the output operand is the equivalent of
+;; a non-offsettable memref, but also it is less efficient than loading
+;; the constant into an FP register, since it will probably be used there.
+;; The "??" is a kludge until we can figure out a more reasonable way
+;; of handling these non-offsettable values.
+(define_insn "*movdf_hardfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=!r,??r,m,!r,!r,!r,f,f,m")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F,f,m,f"))]
+ "! TARGET_POWERPC64 && TARGET_HARD_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register
+ of operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ if (offsettable_memref_p (operands[1])
+ || (GET_CODE (operands[1]) == MEM
+ && (GET_CODE (XEXP (operands[1], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)))
+ {
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ }
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[1], 0));
+ if (refers_to_regno_p (REGNO (operands[0]),
+ REGNO (operands[0]) + 1,
+ operands[1], 0))
+ {
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"{lx|lwzx} %0,%1\";
+ }
+ else
+ {
+ output_asm_insn (\"{lx|lwzx} %0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{lx|lwzx} %L0,%1\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ }
+ case 2:
+ if (offsettable_memref_p (operands[0])
+ || (GET_CODE (operands[0]) == MEM
+ && (GET_CODE (XEXP (operands[0], 0)) == LO_SUM
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_INC
+ || GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)))
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ else
+ {
+ rtx addreg;
+
+ addreg = find_addr_reg (XEXP (operands[0], 0));
+ output_asm_insn (\"{stx|stwx} %1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,4(%0)\", &addreg);
+ output_asm_insn (\"{stx|stwx} %L1,%0\", operands);
+ output_asm_insn (\"{cal|la} %0,-4(%0)\", &addreg);
+ return \"\";
+ }
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ case 6:
+ return \"fmr %0,%1\";
+ case 7:
+ return \"lfd%U1%X1 %0,%1\";
+ case 8:
+ return \"stfd%U0%X0 %1,%0\";
+ }
+}"
+ [(set_attr "type" "*,load,store,*,*,*,fp,fpload,fpstore")
+ (set_attr "length" "8,16,16,8,12,16,*,*,*")])
+
+(define_insn "*movdf_softfloat32"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F"))]
+ "! TARGET_POWERPC64 && TARGET_SOFT_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load
+ it last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is
+ known to be dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ case 2:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "*,load,store,*,*,*")
+ (set_attr "length" "8,8,8,8,12,16")])
+
+(define_insn "*movdf_hardfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=!r,??r,m,!r,!r,!r,f,f,m")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F,f,m,f"))]
+ "TARGET_POWERPC64 && TARGET_HARD_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ #
+ #
+ #
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0"
+ [(set_attr "type" "*,load,store,*,*,*,fp,fpload,fpstore")
+ (set_attr "length" "4,4,4,8,12,16,4,4,4")])
+
+(define_insn "*movdf_softfloat64"
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,r,r,r")
+ (match_operand:DF 1 "input_operand" "r,m,r,G,H,F"))]
+ "TARGET_POWERPC64 && TARGET_SOFT_FLOAT
+ && (gpc_reg_operand (operands[0], DFmode)
+ || gpc_reg_operand (operands[1], DFmode))"
+ "@
+ mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ #
+ #
+ #"
+ [(set_attr "type" "*,load,store,*,*,*")
+ (set_attr "length" "*,*,*,8,12,16")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "any_operand" ""))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "{ rs6000_emit_move (operands[0], operands[1], TFmode); DONE; }")
+
+(define_insn "*movtf_internal"
+ [(set (match_operand:TF 0 "nonimmediate_operand" "=f,f,m,!r,!r,!r")
+ (match_operand:TF 1 "input_operand" "f,m,f,G,H,F"))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128
+ && (gpc_reg_operand (operands[0], TFmode)
+ || gpc_reg_operand (operands[1], TFmode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fmr %L0,%L1\;fmr %0,%1\";
+ else
+ return \"fmr %0,%1\;fmr %L0,%L1\";
+ case 1:
+ return \"lfd %0,%1\;lfd %L0,%L1\";
+ case 2:
+ return \"stfd %1,%0\;stfd %L1,%L0\";
+ case 3:
+ case 4:
+ case 5:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "fp,fpload,fpstore,*,*,*")
+ (set_attr "length" "8,8,8,12,16,20")])
+
+(define_split
+ [(set (match_operand:TF 0 "gpc_reg_operand" "")
+ (match_operand:TF 1 "const_double_operand" ""))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ [(set (match_dup 3) (match_dup 1))
+ (set (match_dup 0)
+ (float_extend:TF (match_dup 3)))]
+ "
+{
+ operands[2] = operand_subword (operands[1], 0, 0, DFmode);
+ operands[3] = gen_reg_rtx (DFmode);
+}")
+
+(define_insn_and_split "extenddftf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (float_extend:TF (match_operand:DF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "#"
+ ""
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ operands[2] = gen_rtx_REG (DFmode, REGNO (operands[0] + 1));
+ operands[3] = CONST0_RTX (DFmode);
+}")
+
+(define_insn_and_split "extendsftf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (float_extend:TF (match_operand:SF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "#"
+ ""
+ [(set (match_dup 2) (match_dup 3))]
+ "
+{
+ operands[2] = gen_rtx_REG (SFmode, REGNO (operands[0] + 1));
+ operands[3] = CONST0_RTX (SFmode);
+}")
+
+(define_insn "trunctfdf2"
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "fadd %0,%1,%L1"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+(define_insn_and_split "trunctfsf2"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=f")
+ (float_truncate:SF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "#"
+ ""
+ [(set (match_dup 2)
+ (float_truncate:DF (match_dup 1)))
+ (set (match_dup 0)
+ (float_truncate:SF (match_dup 2)))]
+ "
+{
+ operands[2] = gen_reg_rtx (DFmode);
+}")
+
+(define_expand "floatditf2"
+ [(set (match_dup 2)
+ (float:DF (match_operand:DI 1 "gpc_reg_operand" "")))
+ (set (match_operand:TF 0 "gpc_reg_operand" "")
+ (float_extend:TF (match_dup 2)))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_POWERPC64
+ && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "{ operands[2] = gen_reg_rtx (DFmode); }")
+
+(define_expand "floatsitf2"
+ [(set (match_dup 2)
+ (float:DF (match_operand:SI 1 "gpc_reg_operand" "")))
+ (set (match_operand:TF 0 "gpc_reg_operand" "")
+ (float_extend:TF (match_dup 2)))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "{ operands[2] = gen_reg_rtx (DFmode); }")
+
+(define_expand "fix_trunctfdi2"
+ [(set (match_dup 2)
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (fix:SI (match_dup 2)))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_POWERPC64
+ && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "{ operands[2] = gen_reg_rtx (DFmode); }")
+
+(define_expand "fix_trunctfsi2"
+ [(set (match_dup 2)
+ (float_truncate:DF (match_operand:TF 1 "gpc_reg_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (fix:SI (match_dup 2)))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "{ operands[2] = gen_reg_rtx (DFmode); }")
+
+(define_insn "negtf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (neg:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fneg %L0,%L1\;fneg %0,%1\";
+ else
+ return \"fneg %0,%1\;fneg %L0,%L1\";
+}"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+(define_insn "abstf2"
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (abs:TF (match_operand:TF 1 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fabs %L0,%L1\;fabs %0,%1\";
+ else
+ return \"fabs %0,%1\;fabs %L0,%L1\";
+}"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:TF 0 "gpc_reg_operand" "=f")
+ (neg:TF (abs:TF (match_operand:TF 1 "gpc_reg_operand" "f"))))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "*
+{
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"fnabs %L0,%L1\;fnabs %0,%1\";
+ else
+ return \"fnabs %0,%1\;fnabs %L0,%L1\";
+}"
+ [(set_attr "type" "fp")
+ (set_attr "length" "8")])
+
+;; Next come the multi-word integer load and store and the load and store
+;; multiple insns.
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "any_operand" ""))]
+ ""
+ "{ rs6000_emit_move (operands[0], operands[1], DImode); DONE; }")
+
+(define_insn "*movdi_internal32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,f,f,m,r,r,r,r,r")
+ (match_operand:DI 1 "input_operand" "r,m,r,f,m,f,IJK,n,G,H,F"))]
+ "! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load it
+ last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is known to be
+ dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %0,%1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\";
+ case 2:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\";
+ case 3:
+ return \"fmr %0,%1\";
+ case 4:
+ return \"lfd%U1%X1 %0,%1\";
+ case 5:
+ return \"stfd%U0%X0 %1,%0\";
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ return \"#\";
+ }
+}"
+ [(set_attr "type" "*,load,store,fp,fpload,fpstore,*,*,*,*,*")
+ (set_attr "length" "8,8,8,*,*,*,8,12,8,12,16")])
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 1))]
+ "
+{
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ DImode);
+#if HOST_BITS_PER_WIDE_INT == 32
+ operands[4] = (value & 0x80000000) ? constm1_rtx : const0_rtx;
+#else
+ operands[4] = GEN_INT (value >> 32);
+ operands[1] = GEN_INT ((value & 0x7fffffff) - (value & 0x80000000));
+#endif
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_double_operand" ""))]
+ "HOST_BITS_PER_WIDE_INT == 32 && ! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "
+{
+ operands[2] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN == 0,
+ DImode);
+ operands[3] = operand_subword_force (operands[0], WORDS_BIG_ENDIAN != 0,
+ DImode);
+ operands[4] = GEN_INT (CONST_DOUBLE_HIGH (operands[1]));
+ operands[5] = GEN_INT (CONST_DOUBLE_LOW (operands[1]));
+}")
+
+(define_insn "*movdi_internal64"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,m,r,r,r,r,f,f,m,r,*h,*h")
+ (match_operand:DI 1 "input_operand" "r,m,r,I,L,nF,R,f,m,f,*h,r,0"))]
+ "TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], DImode)
+ || gpc_reg_operand (operands[1], DImode))"
+ "@
+ mr %0,%1
+ ld%U1%X1 %0,%1
+ std%U0%X0 %1,%0
+ li %0,%1
+ lis %0,%v1
+ #
+ {cal|la} %0,%a1
+ fmr %0,%1
+ lfd%U1%X1 %0,%1
+ stfd%U0%X0 %1,%0
+ mf%1 %0
+ mt%0 %1
+ cror 0,0,0"
+ [(set_attr "type" "*,load,store,*,*,*,*,fp,fpload,fpstore,*,mtjmpr,*")
+ (set_attr "length" "4,4,4,4,4,20,4,4,4,4,4,4,4")])
+
+;; immediate value valid for a single instruction hiding in a const_double
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operand:DI 1 "const_double_operand" "F"))]
+ "HOST_BITS_PER_WIDE_INT == 32 && TARGET_POWERPC64
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && num_insns_constant (operands[1], DImode) == 1"
+ "*
+{
+ return ((unsigned HOST_WIDE_INT)
+ (CONST_DOUBLE_LOW (operands[1]) + 0x8000) < 0x10000)
+ ? \"li %0,%1\" : \"lis %0,%v1\";
+}")
+
+;; Generate all one-bits and clear left or right.
+;; Use (and:DI (rotate:DI ...)) to avoid anddi3 unnecessary clobber.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "mask64_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (const_int -1))
+ (set (match_dup 0)
+ (and:DI (rotate:DI (match_dup 0)
+ (const_int 0))
+ (match_dup 1)))]
+ "")
+
+;; Split a load of a large constant into the appropriate five-instruction
+;; sequence. Handle anything in a constant number of insns.
+;; When non-easy constants can go in the TOC, this should use
+;; easy_fp_constant predicate.
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_int_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "const_double_operand" ""))]
+ "TARGET_POWERPC64 && num_insns_constant (operands[1], DImode) > 1"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 3)))]
+ "
+{ rtx tem = rs6000_emit_set_const (operands[0], DImode, operands[1], 5);
+
+ if (tem == operands[0])
+ DONE;
+ else
+ FAIL;
+}")
+
+;; Split a load of a large constant into the appropriate five-instruction
+(define_insn "*movdi_internal2"
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r") (match_dup 1))]
+ "TARGET_POWERPC64"
+ "@
+ mr. %0,%1
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "4,8")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "") (match_dup 1))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; TImode is similar, except that we usually want to compute the address into
+;; a register and use lsi/stsi (the exception is during reload). MQ is also
+;; clobbered in stsi for POWER, so we need a SCRATCH for it.
+(define_expand "movti"
+ [(parallel [(set (match_operand:TI 0 "general_operand" "")
+ (match_operand:TI 1 "general_operand" ""))
+ (clobber (scratch:SI))])]
+ "TARGET_STRING || TARGET_POWERPC64"
+ "{ rs6000_emit_move (operands[0], operands[1], TImode); DONE; }")
+
+;; We say that MQ is clobbered in the last alternative because the first
+;; alternative would never get used otherwise since it would need a reload
+;; while the 2nd alternative would not. We put memory cases first so they
+;; are preferred. Otherwise, we'd try to reload the output instead of
+;; giving the SCRATCH mq.
+(define_insn "*movti_power"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=Q,m,????r,????r,????r")
+ (match_operand:TI 1 "reg_or_mem_operand" "r,r,r,Q,m"))
+ (clobber (match_scratch:SI 2 "=q,q#X,X,X,X"))]
+ "TARGET_STRING && TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+
+ case 0:
+ return \"{stsi|stswi} %1,%P0,16\";
+
+ case 1:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\;{st|stw} %Y1,%Y0\;{st|stw} %Z1,%Z0\";
+
+ case 2:
+ /* Normally copy registers with lowest numbered register copied first.
+ But copy in the other order if the first register of the output
+ is the second, third, or fourth register in the input. */
+ if (REGNO (operands[0]) >= REGNO (operands[1]) + 1
+ && REGNO (operands[0]) <= REGNO (operands[1]) + 3)
+ return \"mr %Z0,%Z1\;mr %Y0,%Y1\;mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1\";
+ case 3:
+ /* If the address is not used in the output, we can use lsi. Otherwise,
+ fall through to generating four loads. */
+ if (! reg_overlap_mentioned_p (operands[0], operands[1]))
+ return \"{lsi|lswi} %0,%P1,16\";
+ /* ... fall through ... */
+ case 4:
+ /* If the address register is the same as the register for the lowest-
+ addressed word, load it last. Similarly for the next two words.
+ Otherwise load lowest address to highest. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\;{l|lwz} %0,%1\";
+ else if (refers_to_regno_p (REGNO (operands[0]) + 1,
+ REGNO (operands[0]) + 2, operands[1], 0))
+ return \"{l|lwz} %0,%1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\;{l|lwz} %L0,%L1\";
+ else if (refers_to_regno_p (REGNO (operands[0]) + 2,
+ REGNO (operands[0]) + 3, operands[1], 0))
+ return \"{l|lwz} %0,%1\;{l|lwz} %L0,%L1\;{l|lwz} %Z0,%Z1\;{l|lwz} %Y0,%Y1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\";
+ }
+}"
+ [(set_attr "type" "store,store,*,load,load")
+ (set_attr "length" "*,16,16,*,16")])
+
+(define_insn "*movti_string"
+ [(set (match_operand:TI 0 "reg_or_mem_operand" "=m,????r,????r")
+ (match_operand:TI 1 "reg_or_mem_operand" "r,r,m"))
+ (clobber (match_scratch:SI 2 "=X,X,X"))]
+ "TARGET_STRING && ! TARGET_POWER && ! TARGET_POWERPC64
+ && (gpc_reg_operand (operands[0], TImode) || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+
+ case 0:
+ return \"{st%U0|stw%U0} %1,%0\;{st|stw} %L1,%L0\;{st|stw} %Y1,%Y0\;{st|stw} %Z1,%Z0\";
+
+ case 1:
+ /* Normally copy registers with lowest numbered register copied first.
+ But copy in the other order if the first register of the output
+ is the second, third, or fourth register in the input. */
+ if (REGNO (operands[0]) >= REGNO (operands[1]) + 1
+ && REGNO (operands[0]) <= REGNO (operands[1]) + 3)
+ return \"mr %Z0,%Z1\;mr %Y0,%Y1\;mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\;mr %Y0,%Y1\;mr %Z0,%Z1\";
+ case 2:
+ /* If the address register is the same as the register for the lowest-
+ addressed word, load it last. Similarly for the next two words.
+ Otherwise load lowest address to highest. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"{l|lwz} %L0,%L1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\;{l|lwz} %0,%1\";
+ else if (refers_to_regno_p (REGNO (operands[0]) + 1,
+ REGNO (operands[0]) + 2, operands[1], 0))
+ return \"{l|lwz} %0,%1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\;{l|lwz} %L0,%L1\";
+ else if (refers_to_regno_p (REGNO (operands[0]) + 2,
+ REGNO (operands[0]) + 3, operands[1], 0))
+ return \"{l|lwz} %0,%1\;{l|lwz} %L0,%L1\;{l|lwz} %Z0,%Z1\;{l|lwz} %Y0,%Y1\";
+ else
+ return \"{l%U1|lwz%U1} %0,%1\;{l|lwz} %L0,%L1\;{l|lwz} %Y0,%Y1\;{l|lwz} %Z0,%Z1\";
+ }
+}"
+ [(set_attr "type" "store,*,load")
+ (set_attr "length" "16,16,16")])
+
+(define_insn "*movti_ppc64"
+ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,m")
+ (match_operand:TI 1 "input_operand" "r,m,r"))]
+ "TARGET_POWERPC64 && (gpc_reg_operand (operands[0], TImode)
+ || gpc_reg_operand (operands[1], TImode))"
+ "*
+{
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ /* We normally copy the low-numbered register first. However, if
+ the first register operand 0 is the same as the second register of
+ operand 1, we must copy in the opposite order. */
+ if (REGNO (operands[0]) == REGNO (operands[1]) + 1)
+ return \"mr %L0,%L1\;mr %0,%1\";
+ else
+ return \"mr %0,%1\;mr %L0,%L1\";
+ case 1:
+ /* If the low-address word is used in the address, we must load it
+ last. Otherwise, load it first. Note that we cannot have
+ auto-increment in that case since the address register is known to be
+ dead. */
+ if (refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
+ operands[1], 0))
+ return \"ld %L0,%L1\;ld %0,%1\";
+ else
+ return \"ld%U1 %0,%1\;ld %L0,%L1\";
+ case 2:
+ return \"std%U0 %1,%0\;std %L1,%L0\";
+ }
+}"
+ [(set_attr "type" "*,load,store")
+ (set_attr "length" "8,8,8")])
+
+(define_expand "load_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx op1;
+ int i;
+
+ /* Support only loading a constant number of fixed-point registers from
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[1]) != MEM
+ || GET_CODE (operands[0]) != REG
+ || REGNO (operands[0]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[0]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
+ op1 = replace_equiv_address (operands[1],
+ force_reg (SImode, XEXP (operands[1], 0)));
+
+ for (i = 0; i < count; i++)
+ XVECEXP (operands[3], 0, i)
+ = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno + i),
+ adjust_address (op1, SImode, i * 4));
+}")
+
+(define_insn ""
+ [(match_parallel 0 "load_multiple_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (mem:SI (match_operand:SI 2 "gpc_reg_operand" "b")))])]
+ "TARGET_STRING"
+ "*
+{
+ /* We have to handle the case where the pseudo used to contain the address
+ is assigned to one of the output registers. */
+ int i, j;
+ int words = XVECLEN (operands[0], 0);
+ rtx xop[10];
+
+ if (XVECLEN (operands[0], 0) == 1)
+ return \"{l|lwz} %1,0(%2)\";
+
+ for (i = 0; i < words; i++)
+ if (refers_to_regno_p (REGNO (operands[1]) + i,
+ REGNO (operands[1]) + i + 1, operands[2], 0))
+ {
+ if (i == words-1)
+ {
+ xop[0] = operands[1];
+ xop[1] = operands[2];
+ xop[2] = GEN_INT (4 * (words-1));
+ output_asm_insn (\"{lsi|lswi} %0,%1,%2\;{l|lwz} %1,%2(%1)\", xop);
+ return \"\";
+ }
+ else if (i == 0)
+ {
+ xop[0] = operands[1];
+ xop[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ xop[2] = GEN_INT (4 * (words-1));
+ output_asm_insn (\"{cal %0,4(%0)|addi %0,%0,4}\;{lsi|lswi} %1,%0,%2\;{l|lwz} %0,-4(%0)\", xop);
+ return \"\";
+ }
+ else
+ {
+ for (j = 0; j < words; j++)
+ if (j != i)
+ {
+ xop[0] = gen_rtx_REG (SImode, REGNO (operands[1]) + j);
+ xop[1] = operands[2];
+ xop[2] = GEN_INT (j * 4);
+ output_asm_insn (\"{l|lwz} %0,%2(%1)\", xop);
+ }
+ xop[0] = operands[2];
+ xop[1] = GEN_INT (i * 4);
+ output_asm_insn (\"{l|lwz} %0,%1(%0)\", xop);
+ return \"\";
+ }
+ }
+
+ return \"{lsi|lswi} %1,%2,%N0\";
+}"
+ [(set_attr "type" "load")
+ (set_attr "length" "32")])
+
+
+(define_expand "store_multiple"
+ [(match_par_dup 3 [(set (match_operand:SI 0 "" "")
+ (match_operand:SI 1 "" ""))
+ (clobber (scratch:SI))
+ (use (match_operand:SI 2 "" ""))])]
+ "TARGET_STRING && !TARGET_POWERPC64"
+ "
+{
+ int regno;
+ int count;
+ rtx to;
+ rtx op0;
+ int i;
+
+ /* Support only storing a constant number of fixed-point registers to
+ memory and only bother with this if more than two; the machine
+ doesn't support more than eight. */
+ if (GET_CODE (operands[2]) != CONST_INT
+ || INTVAL (operands[2]) <= 2
+ || INTVAL (operands[2]) > 8
+ || GET_CODE (operands[0]) != MEM
+ || GET_CODE (operands[1]) != REG
+ || REGNO (operands[1]) >= 32)
+ FAIL;
+
+ count = INTVAL (operands[2]);
+ regno = REGNO (operands[1]);
+
+ operands[3] = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count + 1));
+ to = force_reg (SImode, XEXP (operands[0], 0));
+ op0 = replace_equiv_address (operands[0], to);
+
+ XVECEXP (operands[3], 0, 0)
+ = gen_rtx_SET (VOIDmode, adjust_address (op0, SImode, 0), operands[1]);
+ XVECEXP (operands[3], 0, 1) = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_SCRATCH (SImode));
+
+ for (i = 1; i < count; i++)
+ XVECEXP (operands[3], 0, i + 1)
+ = gen_rtx_SET (VOIDmode,
+ adjust_address (op0, SImode, i * 4),
+ gen_rtx_REG (SImode, regno + i));
+}")
+
+(define_insn ""
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (match_operand:SI 1 "indirect_operand" "=Q")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "=q"))])]
+ "TARGET_STRING && TARGET_POWER"
+ "{stsi|stswi} %2,%P1,%O0"
+ [(set_attr "type" "store")])
+
+(define_insn ""
+ [(match_parallel 0 "store_multiple_operation"
+ [(set (mem:SI (match_operand:SI 1 "gpc_reg_operand" "b"))
+ (match_operand:SI 2 "gpc_reg_operand" "r"))
+ (clobber (match_scratch:SI 3 "X"))])]
+ "TARGET_STRING && ! TARGET_POWER"
+ "{stsi|stswi} %2,%1,%O0"
+ [(set_attr "type" "store")])
+
+
+;; String/block move insn.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movstrsi"
+ [(parallel [(set (match_operand:BLK 0 "" "")
+ (match_operand:BLK 1 "" ""))
+ (use (match_operand:SI 2 "" ""))
+ (use (match_operand:SI 3 "" ""))])]
+ ""
+ "
+{
+ if (expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+;; Move up to 32 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 8 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movstrsi_8reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (reg:SI 11))
+ (clobber (reg:SI 12))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && TARGET_POWERPC64
+ && ((INTVAL (operands[2]) > 24 && INTVAL (operands[2]) < 32)
+ || INTVAL (operands[2]) == 0)
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 12)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 12)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+;; Move up to 24 bytes at a time. The fixed registers are needed because the
+;; register allocator doesn't have a clue about allocating 6 word registers.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movstrsi_6reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 24
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (reg:SI 9))
+ (clobber (reg:SI 10))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && TARGET_POWERPC64
+ && INTVAL (operands[2]) > 16 && INTVAL (operands[2]) <= 32
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 10)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 10)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+;; Move up to 16 bytes at a time, using 4 fixed registers to avoid spill
+;; problems with TImode.
+;; rD/rS = r5 is preferred, efficient form.
+(define_expand "movstrsi_4reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (reg:SI 5))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 4 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_operand:SI 4 "gpc_reg_operand" "=r"))
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))
+ (clobber (reg:SI 8))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && TARGET_POWERPC64
+ && INTVAL (operands[2]) > 8 && INTVAL (operands[2]) <= 16
+ && (REGNO (operands[0]) < 5 || REGNO (operands[0]) > 8)
+ && (REGNO (operands[1]) < 5 || REGNO (operands[1]) > 8)
+ && REGNO (operands[4]) == 5"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+;; Move up to 8 bytes at a time.
+(define_expand "movstrsi_2reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:DI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING && ! TARGET_POWERPC64"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:DI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && ! TARGET_POWER && ! TARGET_POWERPC64
+ && INTVAL (operands[2]) > 4 && INTVAL (operands[2]) <= 8"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+;; Move up to 4 bytes at a time.
+(define_expand "movstrsi_1reg"
+ [(parallel [(set (match_operand 0 "" "")
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_scratch:SI 5 ""))])]
+ "TARGET_STRING"
+ "")
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "=q"))]
+ "TARGET_STRING && TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:SI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && ! TARGET_POWER
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:DI 0 "gpc_reg_operand" "b"))
+ (mem:BLK (match_operand:DI 1 "gpc_reg_operand" "b")))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "immediate_operand" "i"))
+ (clobber (match_scratch:SI 4 "=&r"))
+ (clobber (match_scratch:SI 5 "X"))]
+ "TARGET_STRING && TARGET_POWERPC64
+ && INTVAL (operands[2]) > 0 && INTVAL (operands[2]) <= 4"
+ "{lsi|lswi} %4,%1,%2\;{stsi|stswi} %4,%0,%2"
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+
+;; Define insns that do load or store with update. Some of these we can
+;; get by using pre-decrement or pre-increment, but the hardware can also
+;; do cases where the increment is not the size of the object.
+;;
+;; In all these cases, we use operands 0 and 1 for the register being
+;; incremented because those are the operands that local-alloc will
+;; tie and these are the pair most likely to be tieable (and the ones
+;; that will benefit the most).
+
+(define_insn "*movdi_update1"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r,r")
+ (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE"
+ "@
+ ldux %3,%0,%2
+ ldu %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movdi_update2"
+ [(set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (sign_extend:DI
+ (mem:SI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0")
+ (match_operand:DI 2 "gpc_reg_operand" "r")))))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "lwaux %3,%0,%2"
+ [(set_attr "type" "load")])
+
+(define_insn "movdi_update"
+ [(set (mem:DI (plus:DI (match_operand:DI 1 "gpc_reg_operand" "0,0")
+ (match_operand:DI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=b,b")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && TARGET_UPDATE"
+ "@
+ stdux %3,%0,%2
+ stdu %3,%2(%0)"
+ [(set_attr "type" "store")])
+
+(define_insn "*movsi_update1"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "movsi_update"
+ [(set (mem:SI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store")])
+
+(define_insn "*movhi_update"
+ [(set (match_operand:HI 3 "gpc_reg_operand" "=r,r")
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movhi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhzux %3,%0,%2
+ lhzu %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movhi_update3"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (sign_extend:SI
+ (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lhaux %3,%0,%2
+ lhau %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movhi_update4"
+ [(set (mem:HI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:HI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ sthux %3,%0,%2
+ sthu %3,%2(%0)"
+ [(set_attr "type" "store")])
+
+(define_insn "*movqi_update1"
+ [(set (match_operand:QI 3 "gpc_reg_operand" "=r,r")
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movqi_update2"
+ [(set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (zero_extend:SI
+ (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ lbzux %3,%0,%2
+ lbzu %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movqi_update3"
+ [(set (mem:QI (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:QI 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_UPDATE"
+ "@
+ stbux %3,%0,%2
+ stbu %3,%2(%0)"
+ [(set_attr "type" "store")])
+
+(define_insn "*movsf_update1"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=f,f")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_UPDATE"
+ "@
+ lfsux %3,%0,%2
+ lfsu %3,%2(%0)"
+ [(set_attr "type" "fpload")])
+
+(define_insn "*movsf_update2"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_UPDATE"
+ "@
+ stfsux %3,%0,%2
+ stfsu %3,%2(%0)"
+ [(set_attr "type" "fpstore")])
+
+(define_insn "*movsf_update3"
+ [(set (match_operand:SF 3 "gpc_reg_operand" "=r,r")
+ (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_SOFT_FLOAT && TARGET_UPDATE"
+ "@
+ {lux|lwzux} %3,%0,%2
+ {lu|lwzu} %3,%2(%0)"
+ [(set_attr "type" "load")])
+
+(define_insn "*movsf_update4"
+ [(set (mem:SF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:SF 3 "gpc_reg_operand" "r,r"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_SOFT_FLOAT && TARGET_UPDATE"
+ "@
+ {stux|stwux} %3,%0,%2
+ {stu|stwu} %3,%2(%0)"
+ [(set_attr "type" "store")])
+
+(define_insn "*movdf_update1"
+ [(set (match_operand:DF 3 "gpc_reg_operand" "=f,f")
+ (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_UPDATE"
+ "@
+ lfdux %3,%0,%2
+ lfdu %3,%2(%0)"
+ [(set_attr "type" "fpload")])
+
+(define_insn "*movdf_update2"
+ [(set (mem:DF (plus:SI (match_operand:SI 1 "gpc_reg_operand" "0,0")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I")))
+ (match_operand:DF 3 "gpc_reg_operand" "f,f"))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=b,b")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_HARD_FLOAT && TARGET_UPDATE"
+ "@
+ stfdux %3,%0,%2
+ stfdu %3,%2(%0)"
+ [(set_attr "type" "fpstore")])
+
+;; Peephole to convert two consecutive FP loads or stores into lfq/stfq.
+
+(define_peephole
+ [(set (match_operand:DF 0 "gpc_reg_operand" "=f")
+ (match_operand:DF 1 "memory_operand" ""))
+ (set (match_operand:DF 2 "gpc_reg_operand" "=f")
+ (match_operand:DF 3 "memory_operand" ""))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT
+ && registers_ok_for_quad_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ && addrs_ok_for_quad_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
+ "lfq%U1%X1 %0,%1")
+
+(define_peephole
+ [(set (match_operand:DF 0 "memory_operand" "")
+ (match_operand:DF 1 "gpc_reg_operand" "f"))
+ (set (match_operand:DF 2 "memory_operand" "")
+ (match_operand:DF 3 "gpc_reg_operand" "f"))]
+ "TARGET_POWER2
+ && TARGET_HARD_FLOAT
+ && registers_ok_for_quad_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_quad_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "stfq%U0%X0 %1,%0")
+
+;; Next come insns related to the calling sequence.
+;;
+;; First, an insn to allocate new stack space for dynamic use (e.g., alloca).
+;; We move the back-chain and decrement the stack pointer.
+
+(define_expand "allocate_stack"
+ [(set (match_operand 0 "gpc_reg_operand" "=r")
+ (minus (reg 1) (match_operand 1 "reg_or_short_operand" "")))
+ (set (reg 1)
+ (minus (reg 1) (match_dup 1)))]
+ ""
+ "
+{ rtx chain = gen_reg_rtx (Pmode);
+ rtx stack_bot = gen_rtx_MEM (Pmode, stack_pointer_rtx);
+ rtx neg_op0;
+
+ emit_move_insn (chain, stack_bot);
+
+ /* Check stack bounds if necessary. */
+ if (current_function_limit_stack)
+ {
+ rtx available;
+ available = expand_binop (Pmode, sub_optab,
+ stack_pointer_rtx, stack_limit_rtx,
+ NULL_RTX, 1, OPTAB_WIDEN);
+ emit_insn (gen_cond_trap (LTU, available, operands[1], const0_rtx));
+ }
+
+ if (GET_CODE (operands[1]) != CONST_INT
+ || INTVAL (operands[1]) < -32767
+ || INTVAL (operands[1]) > 32768)
+ {
+ neg_op0 = gen_reg_rtx (Pmode);
+ if (TARGET_32BIT)
+ emit_insn (gen_negsi2 (neg_op0, operands[1]));
+ else
+ emit_insn (gen_negdi2 (neg_op0, operands[1]));
+ }
+ else
+ neg_op0 = GEN_INT (- INTVAL (operands[1]));
+
+ if (TARGET_UPDATE)
+ emit_insn ((* ((TARGET_32BIT) ? gen_movsi_update : gen_movdi_update))
+ (stack_pointer_rtx, stack_pointer_rtx, neg_op0, chain));
+
+ else
+ {
+ emit_insn ((* ((TARGET_32BIT) ? gen_addsi3 : gen_adddi3))
+ (stack_pointer_rtx, stack_pointer_rtx, neg_op0));
+ emit_move_insn (gen_rtx_MEM (Pmode, stack_pointer_rtx), chain);
+ }
+
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+}")
+
+;; These patterns say how to save and restore the stack pointer. We need not
+;; save the stack pointer at function level since we are careful to
+;; preserve the backchain. At block level, we have to restore the backchain
+;; when we restore the stack pointer.
+;;
+;; For nonlocal gotos, we must save both the stack pointer and its
+;; backchain and restore both. Note that in the nonlocal case, the
+;; save area is a memory location.
+
+(define_expand "save_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+(define_expand "restore_stack_function"
+ [(match_operand 0 "any_operand" "")
+ (match_operand 1 "any_operand" "")]
+ ""
+ "DONE;")
+
+(define_expand "restore_stack_block"
+ [(use (match_operand 0 "register_operand" ""))
+ (set (match_dup 2) (match_dup 3))
+ (set (match_dup 0) (match_operand 1 "register_operand" ""))
+ (set (match_dup 3) (match_dup 2))]
+ ""
+ "
+{
+ operands[2] = gen_reg_rtx (Pmode);
+ operands[3] = gen_rtx_MEM (Pmode, operands[0]);
+}")
+
+(define_expand "save_stack_nonlocal"
+ [(match_operand 0 "memory_operand" "")
+ (match_operand 1 "register_operand" "")]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (Pmode);
+
+ /* Copy the backchain to the first word, sp to the second. */
+ emit_move_insn (temp, gen_rtx_MEM (Pmode, operands[1]));
+ emit_move_insn (operand_subword (operands[0], 0, 0,
+ (TARGET_32BIT ? DImode : TImode)),
+ temp);
+ emit_move_insn (operand_subword (operands[0], 1, 0, (TARGET_32BIT ? DImode : TImode)),
+ operands[1]);
+ DONE;
+}")
+
+(define_expand "restore_stack_nonlocal"
+ [(match_operand 0 "register_operand" "")
+ (match_operand 1 "memory_operand" "")]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (Pmode);
+
+ /* Restore the backchain from the first word, sp from the second. */
+ emit_move_insn (temp,
+ operand_subword (operands[1], 0, 0, (TARGET_32BIT ? DImode : TImode)));
+ emit_move_insn (operands[0],
+ operand_subword (operands[1], 1, 0,
+ (TARGET_32BIT ? DImode : TImode)));
+ emit_move_insn (gen_rtx_MEM (Pmode, operands[0]), temp);
+ DONE;
+}")
+
+;; TOC register handling.
+
+;; Code to initialize the TOC register...
+
+(define_insn "load_toc_aix_si"
+ [(parallel [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(const_int 0)] 7))
+ (use (reg:SI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_32BIT"
+ "*
+{
+ char buf[30];
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"{l|lwz} %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_aix_di"
+ [(parallel [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec:DI [(const_int 0)] 7))
+ (use (reg:DI 2))])]
+ "DEFAULT_ABI == ABI_AIX && TARGET_64BIT"
+ "*
+{
+ char buf[30];
+ ASM_GENERATE_INTERNAL_LABEL (buf, \"LCTOC\", 1);
+ if (TARGET_ELF)
+ strcat (buf, \"@toc\");
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
+ operands[2] = gen_rtx_REG (Pmode, 2);
+ return \"ld %0,%1(%2)\";
+}"
+ [(set_attr "type" "load")])
+
+(define_insn "load_toc_v4_pic_si"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(const_int 0)] 7))]
+ "DEFAULT_ABI == ABI_V4 && flag_pic == 1 && TARGET_32BIT"
+ "bl _GLOBAL_OFFSET_TABLE_@local-4"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "immediate_operand" "s"))
+ (unspec [(match_dup 1)] 7)]
+ "TARGET_ELF && flag_pic == 2"
+ "bl %1\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn "load_toc_v4_PIC_1b"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (match_operand:SI 1 "immediate_operand" "s"))
+ (unspec [(match_dup 1) (match_operand 2 "immediate_operand" "s")] 6)]
+ "TARGET_ELF && flag_pic == 2"
+ "bl %1\\n\\t.long %2-%1+4\\n%1:"
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "load_toc_v4_PIC_2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (minus:SI (match_operand:SI 2 "immediate_operand" "s")
+ (match_operand:SI 3 "immediate_operand" "s")))))]
+ "TARGET_ELF && flag_pic == 2"
+ "{l|lwz} %0,%2-%3(%1)"
+ [(set_attr "type" "load")])
+
+(define_insn "load_macho_picbase"
+ [(set (match_operand:SI 0 "register_operand" "=l")
+ (unspec:SI [(const_int 0)] 15))]
+ "(DEFAULT_ABI == ABI_DARWIN) && flag_pic"
+ "*
+{
+#if TARGET_MACHO
+ char *picbase = machopic_function_base_name ();
+ operands[1] = gen_rtx_SYMBOL_REF (Pmode, ggc_alloc_string (picbase, -1));
+#endif
+ return \"bcl 20,31,%1\\n%1:\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+;; If the TOC is shared over a translation unit, as happens with all
+;; the kinds of PIC that we support, we need to restore the TOC
+;; pointer only when jumping over units of translation.
+
+(define_expand "builtin_setjmp_receiver"
+ [(use (label_ref (match_operand 0 "" "")))]
+ "(DEFAULT_ABI == ABI_V4 && flag_pic == 1)
+ || (TARGET_TOC && TARGET_MINIMAL_TOC)"
+ "
+{
+ rs6000_emit_load_toc_table (FALSE);
+ DONE;
+}")
+
+;; A function pointer under AIX is a pointer to a data area whose first word
+;; contains the actual address of the function, whose second word contains a
+;; pointer to its TOC, and whose third word contains a value to place in the
+;; static chain register (r11). Note that if we load the static chain, our
+;; "trampoline" need not have any executable code.
+
+(define_expand "call_indirect_aix32"
+ [(set (match_dup 2)
+ (mem:SI (match_operand:SI 0 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 0)
+ (const_int 4))))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 0)
+ (const_int 8))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (scratch:SI))])]
+ "TARGET_32BIT"
+ "
+{ operands[2] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_indirect_aix64"
+ [(set (match_dup 2)
+ (mem:DI (match_operand:DI 0 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 0)
+ (const_int 8))))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 0)
+ (const_int 16))))
+ (parallel [(call (mem:SI (match_dup 2))
+ (match_operand 1 "" ""))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (scratch:SI))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = gen_reg_rtx (DImode); }")
+
+(define_expand "call_value_indirect_aix32"
+ [(set (match_dup 3)
+ (mem:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (set (mem:SI (plus:SI (reg:SI 1) (const_int 20)))
+ (reg:SI 2))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (match_dup 1)
+ (const_int 4))))
+ (set (reg:SI 11)
+ (mem:SI (plus:SI (match_dup 1)
+ (const_int 8))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (scratch:SI))])]
+ "TARGET_32BIT"
+ "
+{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "call_value_indirect_aix64"
+ [(set (match_dup 3)
+ (mem:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (set (mem:DI (plus:DI (reg:DI 1) (const_int 40)))
+ (reg:DI 2))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (match_dup 1)
+ (const_int 8))))
+ (set (reg:DI 11)
+ (mem:DI (plus:DI (match_dup 1)
+ (const_int 16))))
+ (parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_dup 3))
+ (match_operand 2 "" "")))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (scratch:SI))])]
+ "TARGET_64BIT"
+ "
+{ operands[3] = gen_reg_rtx (DImode); }")
+
+;; Now the definitions for the call and call_value insns
+(define_expand "call"
+ [(parallel [(call (mem:SI (match_operand 0 "address_operand" ""))
+ (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (clobber (scratch:SI))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (flag_pic)
+ operands[0] = machopic_indirect_call_target (operands[0]);
+#endif
+
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != CONST_INT)
+ abort ();
+
+ operands[0] = XEXP (operands[0], 0);
+
+ if (GET_CODE (operands[0]) != SYMBOL_REF
+ || (INTVAL (operands[2]) & CALL_LONG) != 0)
+ {
+ if (INTVAL (operands[2]) & CALL_LONG)
+ operands[0] = rs6000_longcall_ref (operands[0]);
+
+ if (DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_AIX_NODESC
+ || DEFAULT_ABI == ABI_DARWIN)
+ operands[0] = force_reg (Pmode, operands[0]);
+
+ else if (DEFAULT_ABI == ABI_AIX)
+ {
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_indirect_aix32 (force_reg (SImode,
+ operands[0]),
+ operands[1])
+ : gen_call_indirect_aix64 (force_reg (DImode,
+ operands[0]),
+ operands[1]));
+ DONE;
+ }
+ else
+ abort ();
+ }
+}")
+
+(define_expand "call_value"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand 1 "address_operand" ""))
+ (match_operand 2 "" "")))
+ (use (match_operand 3 "" ""))
+ (clobber (scratch:SI))])]
+ ""
+ "
+{
+#if TARGET_MACHO
+ if (flag_pic)
+ operands[1] = machopic_indirect_call_target (operands[1]);
+#endif
+
+ if (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != CONST_INT)
+ abort ();
+
+ operands[1] = XEXP (operands[1], 0);
+
+ if (GET_CODE (operands[1]) != SYMBOL_REF
+ || (INTVAL (operands[3]) & CALL_LONG) != 0)
+ {
+ if (INTVAL (operands[3]) & CALL_LONG)
+ operands[1] = rs6000_longcall_ref (operands[1]);
+
+ if (DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_AIX_NODESC
+ || DEFAULT_ABI == ABI_DARWIN)
+ operands[0] = force_reg (Pmode, operands[0]);
+
+ else if (DEFAULT_ABI == ABI_AIX)
+ {
+ /* AIX function pointers are really pointers to a three word
+ area. */
+ emit_call_insn (TARGET_32BIT
+ ? gen_call_value_indirect_aix32 (operands[0],
+ force_reg (SImode,
+ operands[1]),
+ operands[2])
+ : gen_call_value_indirect_aix64 (operands[0],
+ force_reg (DImode,
+ operands[1]),
+ operands[2]));
+ DONE;
+ }
+ else
+ abort ();
+ }
+}")
+
+;; Call to function in current module. No TOC pointer reload needed.
+;; Operand2 is non-zero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn "*call_local32"
+ [(call (mem:SI (match_operand:SI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "(INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_local64"
+ [(call (mem:SI (match_operand:DI 0 "current_file_function_operand" "s,s"))
+ (match_operand 1 "" "g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 3 "=l,l"))]
+ "TARGET_64BIT && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@local\" : \"bl %z0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+(define_insn "*call_value_local32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "(INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+
+(define_insn "*call_value_local64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "current_file_function_operand" "s,s"))
+ (match_operand 2 "" "g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n"))
+ (clobber (match_scratch:SI 4 "=l,l"))]
+ "TARGET_64BIT && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@local\" : \"bl %z1\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4,8")])
+
+;; Call to function which may be in another module. Restore the TOC
+;; pointer (r2) after the call unless this is System V.
+;; Operand2 is non-zero if we are using the V.4 calling sequence and
+;; either the function was not prototyped, or it was prototyped as a
+;; variable argument function. It is > 0 if FP registers were passed
+;; and < 0 if they were not.
+
+(define_insn "*call_indirect_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "register_operand" "cl"))
+ (match_operand 1 "" "g"))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (match_scratch:SI 2 "=l"))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T0l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix32"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_indirect_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "register_operand" "cl"))
+ (match_operand 1 "" "g"))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (match_scratch:SI 2 "=l"))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T0l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_nonlocal_aix64"
+ [(call (mem:SI (match_operand:DI 0 "call_operand" "s"))
+ (match_operand 1 "" "g"))
+ (use (match_operand:SI 2 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[2]) & CALL_LONG) == 0"
+ "bl %z0\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_indirect_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "register_operand" "cl"))
+ (match_operand 2 "" "g")))
+ (use (reg:SI 2))
+ (use (reg:SI 11))
+ (set (reg:SI 2)
+ (mem:SI (plus:SI (reg:SI 1) (const_int 20))))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_32BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T1l\;{l|lwz} 2,20(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix32"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 4 "=l"))]
+ "TARGET_32BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_indirect_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "register_operand" "cl"))
+ (match_operand 2 "" "g")))
+ (use (reg:DI 2))
+ (use (reg:DI 11))
+ (set (reg:DI 2)
+ (mem:DI (plus:DI (reg:DI 1) (const_int 40))))
+ (clobber (match_scratch:SI 3 "=l"))]
+ "TARGET_64BIT && DEFAULT_ABI == ABI_AIX"
+ "b%T1l\;ld 2,40(1)"
+ [(set_attr "type" "jmpreg")
+ (set_attr "length" "8")])
+
+(define_insn "*call_value_nonlocal_aix64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand" "s"))
+ (match_operand 2 "" "g")))
+ (use (match_operand:SI 3 "immediate_operand" "O"))
+ (clobber (match_scratch:SI 4 "=l"))]
+ "TARGET_64BIT
+ && DEFAULT_ABI == ABI_AIX
+ && (INTVAL (operands[3]) & CALL_LONG) == 0"
+ "bl %z1\;%."
+ [(set_attr "type" "branch")
+ (set_attr "length" "8")])
+
+;; A function pointer under System V is just a normal pointer
+;; operands[0] is the function pointer
+;; operands[1] is the stack size to clean up
+;; operands[2] is the value FUNCTION_ARG returns for the VOID argument
+;; which indicates how to set cr1
+
+(define_insn "*call_nonlocal_sysv"
+ [(call (mem:SI (match_operand:SI 0 "call_operand" "cl,cl,s,s"))
+ (match_operand 1 "" "g,g,g,g"))
+ (use (match_operand:SI 2 "immediate_operand" "O,n,O,n"))
+ (clobber (match_scratch:SI 3 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_AIX_NODESC
+ || DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ if (INTVAL (operands[2]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[2]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ case 1:
+ return \"b%T0l\";
+ case 2:
+ case 3:
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z0@plt\" : \"bl %z0\";
+ }
+}"
+ [(set_attr "type" "jmpreg,jmpreg,branch,branch")
+ (set_attr "length" "4,8,4,8")])
+
+(define_insn "*call_value_nonlocal_sysv"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand" "cl,cl,s,s"))
+ (match_operand 2 "" "g,g,g,g")))
+ (use (match_operand:SI 3 "immediate_operand" "O,n,O,n"))
+ (clobber (match_scratch:SI 4 "=l,l,l,l"))]
+ "DEFAULT_ABI == ABI_AIX_NODESC
+ || DEFAULT_ABI == ABI_V4
+ || DEFAULT_ABI == ABI_DARWIN"
+ "*
+{
+ if (INTVAL (operands[3]) & CALL_V4_SET_FP_ARGS)
+ output_asm_insn (\"crxor 6,6,6\", operands);
+
+ else if (INTVAL (operands[3]) & CALL_V4_CLEAR_FP_ARGS)
+ output_asm_insn (\"creqv 6,6,6\", operands);
+
+ switch (which_alternative)
+ {
+ default:
+ abort ();
+ case 0:
+ case 1:
+ return \"b%T1l\";
+ case 2:
+ case 3:
+ return (DEFAULT_ABI == ABI_V4 && flag_pic) ? \"bl %z1@plt\" : \"bl %z1\";
+ }
+}"
+ [(set_attr "type" "jmpreg,jmpreg,branch,branch")
+ (set_attr "length" "4,8,4,8")])
+
+;; Call subroutine returning any type.
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, const0_rtx, const0_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+;; Compare insns are next. Note that the RS/6000 has two types of compares,
+;; signed & unsigned, and one type of branch.
+;;
+;; Start with the DEFINE_EXPANDs to generate the rtl for compares, scc
+;; insns, and branches. We store the operands of compares until we see
+;; how it is used.
+(define_expand "cmpsi"
+ [(set (cc0)
+ (compare (match_operand:SI 0 "gpc_reg_operand" "")
+ (match_operand:SI 1 "reg_or_short_operand" "")))]
+ ""
+ "
+{
+ /* Take care of the possibility that operands[1] might be negative but
+ this might be a logical operation. That insn doesn't exist. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ operands[1] = force_reg (SImode, operands[1]);
+
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "cmpdi"
+ [(set (cc0)
+ (compare (match_operand:DI 0 "gpc_reg_operand" "")
+ (match_operand:DI 1 "reg_or_short_operand" "")))]
+ "TARGET_POWERPC64"
+ "
+{
+ /* Take care of the possibility that operands[1] might be negative but
+ this might be a logical operation. That insn doesn't exist. */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 0)
+ operands[1] = force_reg (DImode, operands[1]);
+
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 0;
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ [(set (cc0) (compare (match_operand:SF 0 "gpc_reg_operand" "")
+ (match_operand:SF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ [(set (cc0) (compare (match_operand:DF 0 "gpc_reg_operand" "")
+ (match_operand:DF 1 "gpc_reg_operand" "")))]
+ "TARGET_HARD_FLOAT"
+ "
+{
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "cmptf"
+ [(set (cc0) (compare (match_operand:TF 0 "gpc_reg_operand" "")
+ (match_operand:TF 1 "gpc_reg_operand" "")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "
+{
+ rs6000_compare_op0 = operands[0];
+ rs6000_compare_op1 = operands[1];
+ rs6000_compare_fp_p = 1;
+ DONE;
+}")
+
+(define_expand "beq"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (EQ, operands[0]); DONE; }")
+
+(define_expand "bne"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (NE, operands[0]); DONE; }")
+
+(define_expand "bge"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GE, operands[0]); DONE; }")
+
+(define_expand "bgt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GT, operands[0]); DONE; }")
+
+(define_expand "ble"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LE, operands[0]); DONE; }")
+
+(define_expand "blt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LT, operands[0]); DONE; }")
+
+(define_expand "bgeu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GEU, operands[0]); DONE; }")
+
+(define_expand "bgtu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (GTU, operands[0]); DONE; }")
+
+(define_expand "bleu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LEU, operands[0]); DONE; }")
+
+(define_expand "bltu"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LTU, operands[0]); DONE; }")
+
+(define_expand "bunordered"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNORDERED, operands[0]); DONE; }")
+
+(define_expand "bordered"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (ORDERED, operands[0]); DONE; }")
+
+(define_expand "buneq"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNEQ, operands[0]); DONE; }")
+
+(define_expand "bunge"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNGE, operands[0]); DONE; }")
+
+(define_expand "bungt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNGT, operands[0]); DONE; }")
+
+(define_expand "bunle"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNLE, operands[0]); DONE; }")
+
+(define_expand "bunlt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (UNLT, operands[0]); DONE; }")
+
+(define_expand "bltgt"
+ [(use (match_operand 0 "" ""))]
+ ""
+ "{ rs6000_emit_cbranch (LTGT, operands[0]); DONE; }")
+
+;; For SNE, we would prefer that the xor/abs sequence be used for integers.
+;; For SEQ, likewise, except that comparisons with zero should be done
+;; with an scc insns. However, due to the order that combine see the
+;; resulting insns, we must, in fact, allow SEQ for integers. Fail in
+;; the cases we don't want to handle.
+(define_expand "seq"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (EQ, operands[0]); DONE; }")
+
+(define_expand "sne"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p)
+ FAIL;
+
+ rs6000_emit_sCOND (NE, operands[0]);
+ DONE;
+}")
+
+;; A > 0 is best done using the portable sequence, so fail in that case.
+(define_expand "sgt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p
+ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ FAIL;
+
+ rs6000_emit_sCOND (GT, operands[0]);
+ DONE;
+}")
+
+;; A < 0 is best done in the portable way for A an integer.
+(define_expand "slt"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p
+ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ FAIL;
+
+ rs6000_emit_sCOND (LT, operands[0]);
+ DONE;
+}")
+
+;; A >= 0 is best done the portable way for A an integer.
+(define_expand "sge"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p
+ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ FAIL;
+
+ rs6000_emit_sCOND (GE, operands[0]);
+ DONE;
+}")
+
+;; A <= 0 is best done the portable way for A an integer.
+(define_expand "sle"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "
+{
+ if (! rs6000_compare_fp_p
+ && (! TARGET_POWER || rs6000_compare_op1 == const0_rtx))
+ FAIL;
+
+ rs6000_emit_sCOND (LE, operands[0]);
+ DONE;
+}")
+
+(define_expand "sgtu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GTU, operands[0]); DONE; }")
+
+(define_expand "sltu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LTU, operands[0]); DONE; }")
+
+(define_expand "sgeu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (GEU, operands[0]); DONE; }")
+
+(define_expand "sleu"
+ [(clobber (match_operand:SI 0 "gpc_reg_operand" ""))]
+ ""
+ "{ rs6000_emit_sCOND (LEU, operands[0]); DONE; }")
+
+;; Here are the actual compare insns.
+(define_insn "*cmpsi_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ ""
+ "{cmp%I2|cmpw%I2} %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmpdi_internal1"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWERPC64"
+ "cmpd%I2 %0,%1,%2"
+ [(set_attr "type" "compare")])
+
+;; If we are comparing a register for equality with a large constant,
+;; we can do this with an XOR followed by a compare. But we need a scratch
+;; register for the result of the XOR.
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "non_short_cint_operand" "")))
+ (clobber (match_operand:SI 3 "gpc_reg_operand" ""))]
+ "find_single_use (operands[0], insn, 0)
+ && (GET_CODE (*find_single_use (operands[0], insn, 0)) == EQ
+ || GET_CODE (*find_single_use (operands[0], insn, 0)) == NE)"
+ [(set (match_dup 3) (xor:SI (match_dup 1) (match_dup 4)))
+ (set (match_dup 0) (compare:CC (match_dup 3) (match_dup 5)))]
+ "
+{
+ /* Get the constant we are comparing against, C, and see what it looks like
+ sign-extended to 16 bits. Then see what constant could be XOR'ed
+ with C to get the sign-extended value. */
+
+ HOST_WIDE_INT c = INTVAL (operands[2]);
+ HOST_WIDE_INT sextc = (c & 0x7fff) - (c & 0x8000);
+ HOST_WIDE_INT xorv = c ^ sextc;
+
+ operands[4] = GEN_INT (xorv);
+ operands[5] = GEN_INT (sextc);
+}")
+
+(define_insn "*cmpsi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "{cmpl%I2|cmplw%I2} %0,%1,%b2"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmpdi_internal2"
+ [(set (match_operand:CCUNS 0 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_u_short_operand" "rK")))]
+ ""
+ "cmpld%I2 %0,%1,%b2"
+ [(set_attr "type" "compare")])
+
+;; The following two insns don't exist as single insns, but if we provide
+;; them, we can swap an add and compare, which will enable us to overlap more
+;; of the required delay between a compare and branch. We generate code for
+;; them by splitting.
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=y")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "=y")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "u_short_cint_operand" "i")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "i")))]
+ ""
+ "#"
+ [(set_attr "length" "8")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_operand" "")
+ (compare:CC (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CC (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_split
+ [(set (match_operand:CCUNS 3 "cc_reg_operand" "")
+ (compare:CCUNS (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "u_short_cint_operand" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1) (match_operand:SI 4 "short_cint_operand" "")))]
+ ""
+ [(set (match_dup 3) (compare:CCUNS (match_dup 1) (match_dup 2)))
+ (set (match_dup 0) (plus:SI (match_dup 1) (match_dup 4)))])
+
+(define_insn "*cmpsf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:SF 1 "gpc_reg_operand" "f")
+ (match_operand:SF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+(define_insn "*cmpdf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:DF 1 "gpc_reg_operand" "f")
+ (match_operand:DF 2 "gpc_reg_operand" "f")))]
+ "TARGET_HARD_FLOAT"
+ "fcmpu %0,%1,%2"
+ [(set_attr "type" "fpcompare")])
+
+;; Only need to compare second words if first words equal
+(define_insn "*cmptf_internal1"
+ [(set (match_operand:CCFP 0 "cc_reg_operand" "=y")
+ (compare:CCFP (match_operand:TF 1 "gpc_reg_operand" "f")
+ (match_operand:TF 2 "gpc_reg_operand" "f")))]
+ "DEFAULT_ABI == ABI_AIX && TARGET_HARD_FLOAT && TARGET_LONG_DOUBLE_128"
+ "fcmpu %0,%1,%2\;bne %0,$+4\;fcmpu %0,%L1,%L2"
+ [(set_attr "type" "fpcompare")
+ (set_attr "length" "12")])
+
+;; Now we have the scc insns. We can do some combinations because of the
+;; way the machine works.
+;;
+;; Note that this is probably faster if we can put an insn between the
+;; mfcr and rlinm, but this is tricky. Let's leave it for now. In most
+;; cases the insns below which don't use an intermediate CR field will
+;; be used instead.
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ ""
+ "%D1mfcr %0\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64"
+ "%D1mfcr %0\;{rlinm|rlwinm} %0,%0,%J1,1"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r,r")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "! TARGET_POWERPC64"
+ "@
+ %D1mfcr %3\;{rlinm.|rlwinm.} %3,%3,%J1,1
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (const_int 0)))
+ (set (match_operand:SI 3 "gpc_reg_operand" "")
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (match_op_dup 1 [(match_dup 2) (const_int 0)]))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n")))]
+ ""
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[4] = GEN_INT (count);
+ operands[5] = GEN_INT (put_bit);
+
+ return \"%D1mfcr %0\;{rlinm|rlwinm} %0,%0,%4,%5,%5\";
+}"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y,y")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" "n,n"))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "=r,r")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ int is_bit = ccr_bit (operands[1], 1);
+ int put_bit = 31 - (INTVAL (operands[3]) & 31);
+ int count;
+
+ /* Force split for non-cc0 compare. */
+ if (which_alternative == 1)
+ return \"#\";
+
+ if (is_bit >= put_bit)
+ count = is_bit - put_bit;
+ else
+ count = 32 - (put_bit - is_bit);
+
+ operands[5] = GEN_INT (count);
+ operands[6] = GEN_INT (put_bit);
+
+ return \"%D1mfcr %4\;{rlinm.|rlwinm.} %4,%4,%5,%6,%6\";
+}"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ashift:SI (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "")
+ (const_int 0)])
+ (match_operand:SI 3 "const_int_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 4 "gpc_reg_operand" "")
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (ashift:SI (match_op_dup 1 [(match_dup 2) (const_int 0)])
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+;; There is a 3 cycle delay between consecutive mfcr instructions
+;; so it is useful to combine 2 scc instructions to use only one mfcr.
+
+(define_peephole
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (match_operator:SI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:SI 3 "gpc_reg_operand" "=r")
+ (match_operator:SI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "REGNO (operands[2]) != REGNO (operands[5])"
+ "%D1%D4mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "length" "20")])
+
+(define_peephole
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (match_operator:DI 1 "scc_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)]))
+ (set (match_operand:DI 3 "gpc_reg_operand" "=r")
+ (match_operator:DI 4 "scc_comparison_operator"
+ [(match_operand 5 "cc_reg_operand" "y")
+ (const_int 0)]))]
+ "TARGET_POWERPC64 && REGNO (operands[2]) != REGNO (operands[5])"
+ "%D1%D4mfcr %3\;{rlinm|rlwinm} %0,%3,%J1,1\;{rlinm|rlwinm} %3,%3,%J4,1"
+ [(set_attr "length" "20")])
+
+;; There are some scc insns that can be done directly, without a compare.
+;; These are faster because they don't involve the communications between
+;; the FXU and branch units. In fact, we will be replacing all of the
+;; integer scc insns here or in the portable methods in emit_store_flag.
+;;
+;; Also support (neg (scc ..)) since that construct is used to replace
+;; branches, (plus (scc ..) ..) since that construct is common and
+;; takes no more insns than scc, and (and (neg (scc ..)) ..) in the
+;; cases where it is no more expensive than (neg (scc ..)).
+
+;; Have reload force a constant into a register for the simple insns that
+;; otherwise won't accept constants. We do this because it is faster than
+;; the cmp/mfcr sequence we would otherwise generate.
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I")))
+ (clobber (match_scratch:SI 3 "=r,&r,r,r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %3,%1,0\;{ae|adde} %0,%3,%1
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae|adde} %0,%3,%0"
+ [(set_attr "length" "12,8,12,12,12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (eq:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,O,K,J,I")))
+ (clobber (match_scratch:DI 3 "=r,&r,r,r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ xor %0,%1,%2\;subfic %3,%0,0\;adde %0,%3,%0
+ subfic %3,%1,0\;adde %0,%3,%1
+ xori %0,%1,%b2\;subfic %3,%0,0\;adde %0,%3,%0
+ xoris %0,%1,%u2\;subfic %3,%0,0\;adde %0,%3,%0
+ subfic %0,%1,%2\;subfic %3,%0,0\;adde %0,%3,%0"
+ [(set_attr "length" "12,8,12,12,12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
+ (eq:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,&r,r,r,r,r,&r,r,r,r"))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
+ {sfi|subfic} %3,%1,0\;{ae.|adde.} %0,%3,%1
+ {xoril|xori} %0,%1,%b2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
+ {xoriu|xoris} %0,%1,%u2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
+ {sfi|subfic} %0,%1,%2\;{sfi|subfic} %3,%0,0\;{ae.|adde.} %0,%3,%0
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (eq:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (eq:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (eq:DI (match_operand:DI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:DI 2 "reg_or_cint_operand" "r,O,K,J,I,r,O,K,J,I"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
+ (eq:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:DI 3 "=r,&r,r,r,r,r,&r,r,r,r"))]
+ "TARGET_POWERPC64"
+ "@
+ xor %0,%1,%2\;subfic %3,%0,0\;adde. %0,%3,%0
+ subfic %3,%1,0\;adde. %0,%3,%1
+ xori %0,%1,%b2\;subfic %3,%0,0\;adde. %0,%3,%0
+ xoris %0,%1,%u2\;subfic %3,%0,0\;adde. %0,%3,%0
+ subfic %0,%1,%2\;subfic %3,%0,0\;adde. %0,%3,%0
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (eq:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_cint_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (eq:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (eq:DI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+;; We have insns of the form shown by the first define_insn below. If
+;; there is something inside the comparison operation, we must split it.
+(define_split
+ [(set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_operator 1 "comparison_operator"
+ [(match_operand:SI 2 "" "")
+ (match_operand:SI 3
+ "reg_or_cint_operand" "")])
+ (match_operand:SI 4 "gpc_reg_operand" "")))
+ (clobber (match_operand:SI 5 "register_operand" ""))]
+ "! gpc_reg_operand (operands[2], SImode)"
+ [(set (match_dup 5) (match_dup 2))
+ (set (match_dup 2) (plus:SI (match_op_dup 1 [(match_dup 2) (match_dup 3)])
+ (match_dup 4)))])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (plus:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r")))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
+ {sfi|subfic} %4,%1,0\;{aze|addze} %0,%3
+ {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
+ {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
+ {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12,8,12,12,12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,0\;{aze.|addze.} %4,%3
+ {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (eq:SI (match_dup 1)
+ (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,x,x,x,?y,?y,?y,?y,?y")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r,r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I,r,O,K,L,I"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r,r,r,r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r,r,r,r,r,r")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r,&r,&r,&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %4,%1,0\;{aze.|addze.} %0,%3
+ {xoril|xori} %4,%1,%b2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ {xoriu|xoris} %4,%1,%u2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ {sfi|subfic} %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ #
+ #
+ #
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,8,12,12,12,16,12,16,16,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI
+ (eq:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_cint_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (eq:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r,r")
+ (neg:SI (eq:SI (match_operand:SI 1 "gpc_reg_operand" "%r,r,r,r,r")
+ (match_operand:SI 2 "reg_or_cint_operand" "r,O,K,L,I"))))]
+ "! TARGET_POWERPC64"
+ "@
+ xor %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,-1\;{sfe|subfe} %0,%0,%0
+ {xoril|xori} %0,%1,%b2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {xoriu|xoris} %0,%1,%u2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {sfi|subfic} %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "12,8,12,12,12")])
+
+;; Simplify (ne X (const_int 0)) on the PowerPC. No need to on the Power,
+;; since it nabs/sr is just as fast.
+(define_insn "*ne0"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=&r")
+ (lshiftrt:SI (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31)))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "! TARGET_POWER && ! TARGET_POWERPC64"
+ "{ai|addic} %2,%1,-1\;{sfe|subfe} %0,%2,%1"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (lshiftrt:DI (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63)))
+ (clobber (match_scratch:DI 2 "=&r"))]
+ "TARGET_POWERPC64"
+ "addic %2,%1,-1\;subfe %0,%2,%1"
+ [(set_attr "length" "8")])
+
+;; This is what (plus (ne X (const_int 0)) Y) looks like.
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "! TARGET_POWERPC64"
+ "{ai|addic} %3,%1,-1\;{aze|addze} %0,%2"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ "TARGET_POWERPC64"
+ "addic %3,%1,-1\;addze %0,%2"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1)))
+ (const_int 31))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addic %3,%1,-1\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1)))
+ (const_int 63))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {ai|addic} %3,%1,-1\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lshiftrt:SI
+ (neg:SI (abs:SI (match_operand:SI 1 "gpc_reg_operand" "")))
+ (const_int 31))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (lshiftrt:SI (neg:SI (abs:SI (match_dup 1))) (const_int 31))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addic %3,%1,-1\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (lshiftrt:DI
+ (neg:DI (abs:DI (match_operand:DI 1 "gpc_reg_operand" "")))
+ (const_int 63))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:DI (lshiftrt:DI (neg:DI (abs:DI (match_dup 1))) (const_int 63))
+ (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O")))
+ (clobber (match_scratch:SI 3 "=r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,X,r,X"))]
+ "TARGET_POWER"
+ "@
+ doz %3,%2,%1\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{sri.|srwi.} %0,%0,31
+ #
+ #"
+ [(set_attr "type" "compare,delayed_compare,compare,delayed_compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (le:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3
+ {srai|srawi} %4,%1,31\;{sf|subfc} %4,%1,%4\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ {srai|srawi} %4,%1,31\;{sf|subfc} %4,%1,%4\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O,r,O"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ {srai|srawi} %4,%1,31\;{sf|subfc} %4,%1,%4\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (le:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (le:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,O"))))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,-1\;{aze|addze} %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (leu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWERPC64"
+ "subf%I2c %0,%1,%2\;li %0,0\;adde %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (leu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (leu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ subf%I2c %0,%1,%2\;li %0,0\;adde. %0,%0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (leu:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (leu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (leu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (leu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (leu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (leu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (leu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (leu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ subf%I2c %0,%1,%2\;li %0,0\;adde. %0,%0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %4,%1,%2\;{aze|addze} %0,%3"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (leu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;andc %0,%3,%4"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (leu:SI (match_dup 1)
+ (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;andc. %0,%3,%4
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (leu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (neg:SI (leu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %0,%1,%2\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (lt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (lt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "TARGET_POWER"
+ "doz%I2 %4,%1,%2\;{ai|addic} %4,%4,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{ai|addic} %4,%4,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (lt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (lt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;neg %0,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;neg %0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (ltu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ltu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (ltu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:SI 3 "reg_or_short_operand" "rI,rI")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (ltu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %0,%4,%3
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %0,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (ltu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (ltu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))
+ (clobber (match_scratch:SI 3 "=r"))]
+ "TARGET_POWER"
+ "doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae|adde} %0,%0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=r,r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %3,%1,%2\;{sfi|subfic} %0,%3,0\;{ae.|adde.} %0,%0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (ge:SI (match_dup 1) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "TARGET_POWER"
+ "doz%I2 %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz%I2 %4,%1,%2\;{sfi|subfic} %4,%4,0\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (ge:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (ge:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWER"
+ "doz%I2 %0,%1,%2\;{ai|addic} %0,%0,-1\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae|adde} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (geu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_neg_short_operand" "r,P")))]
+ "TARGET_POWERPC64"
+ "@
+ subfc %0,%2,%1\;li %0,0\;adde %0,%0,%0
+ addic %0,%1,%n2\;li %0,0\;adde %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (geu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ {ai|addic} %0,%1,%n2\;{cal %0,0(0)|li %0,0}\;{ae.|adde.} %0,%0,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (geu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (geu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (geu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (geu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ subfc %0,%2,%1\;li %0,0\;adde. %0,%0,%0
+ addic %0,%1,%n2\;li %0,0\;adde. %0,%0,%0
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (geu:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_neg_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (geu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (geu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{aze|addze} %0,%3
+ {ai|addic} %4,%1,%n2\;{aze|addze} %0,%3"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{aze.|addze.} %4,%3
+ {ai|addic} %4,%1,%n2\;{aze.|addze.} %4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{aze.|addze.} %0,%3
+ {ai|addic} %4,%1,%n2\;{aze.|addze.} %0,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,8,12,12")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (geu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (neg:SI (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,I"))))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %0,%2,%1\;{sfe|subfe} %0,%0,%0\;nand %0,%0,%0
+ {sfi|subfic} %0,%1,-1\;{a%I2|add%I2c} %0,%0,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;andc %0,%3,%4
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;andc %0,%3,%4"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;andc. %4,%3,%4
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (and:SI (neg:SI (geu:SI (match_dup 1)
+ (match_dup 2)))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "r,P,r,P")))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf|subfc} %4,%2,%1\;{sfe|subfe} %4,%4,%4\;andc. %0,%3,%4
+ {ai|addic} %4,%1,%n2\;{sfe|subfe} %4,%4,%4\;andc. %0,%3,%4
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,12,16,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (and:SI (neg:SI
+ (geu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_neg_short_operand" "")))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (and:SI (neg:SI (geu:SI (match_dup 1) (match_dup 2))) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (const_int 0)))]
+ "! TARGET_POWERPC64"
+ "{sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (const_int 0)))]
+ "TARGET_POWERPC64"
+ "subfic %0,%1,0\;addme %0,%0\;srdi %0,%0,63"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_POWERPC64"
+ "@
+ {sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (gt:SI (match_dup 1) (const_int 0)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 2 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (gt:DI (match_dup 1) (const_int 0)))]
+ "TARGET_POWERPC64"
+ "@
+ subfic %0,%1,0\;addme %0,%0\;srdi. %0,%0,63
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 2 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (gt:DI (match_dup 1) (const_int 0)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (gt:DI (match_dup 1) (const_int 0)))
+ (set (match_dup 2)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r")))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{sri|srwi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER"
+ "@
+ doz %0,%2,%1\;nabs %0,%0\;{sri.|srwi.} %0,%0,31
+ #"
+ [(set_attr "type" "delayed_compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (gt:SI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 0)
+ (gt:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "! TARGET_POWERPC64"
+ "{a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze|addze} %0,%2"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:DI 3 "=&r"))]
+ "TARGET_POWERPC64"
+ "addc %3,%1,%1\;subfe %3,%1,%3\;addze %0,%2"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze.|addze.} %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addc %3,%1,%1\;subfe %3,%1,%3\;addze. %3,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 3)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0))
+ (match_dup 2)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 3)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {a|addc} %3,%1,%1\;{sfe|subfe} %3,%1,%3\;{aze.|addze.} %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_scratch:SI 3 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 4 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_scratch:DI 3 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addc %3,%1,%1\;subfe %3,%1,%3\;addze. %0,%2
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 4 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 0))
+ (match_operand:DI 2 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_scratch:DI 3 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:DI (gt:DI (match_dup 1) (const_int 0)) (match_dup 2)))
+ (clobber (match_dup 3))])
+ (set (match_dup 4)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r")))
+ (clobber (match_scratch:SI 4 "=&r"))]
+ "TARGET_POWER"
+ "doz %4,%2,%1\;{ai|addic} %4,%4,-1\;{aze|addze} %0,%3"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{ai|addic} %4,%4,-1\;{aze.|addze.} %4,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "r,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "TARGET_POWER"
+ "@
+ doz %4,%2,%1\;{ai|addic} %4,%4,-1\;{aze.|addze.} %0,%3
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "TARGET_POWER && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (gt:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (const_int 0))))]
+ "! TARGET_POWERPC64"
+ "{sfi|subfic} %0,%1,0\;{ame|addme} %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (neg:DI (gt:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (const_int 0))))]
+ "TARGET_POWERPC64"
+ "subfic %0,%1,0\;addme %0,%0\;sradi %0,%0,63"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (gt:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "r"))))]
+ "TARGET_POWER"
+ "doz %0,%2,%1\;nabs %0,%0\;{srai|srawi} %0,%0,31"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;neg %0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI")))]
+ "TARGET_POWERPC64"
+ "subf%I2c %0,%1,%2\;subfe %0,%0,%0\;neg %0,%0"
+ [(set_attr "length" "12")])
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (gtu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64"
+ "@
+ {sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0\;neg. %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (gtu:SI (match_dup 1) (match_dup 2)))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (gtu:SI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 3 "cc_reg_operand" "=x,?y")
+ (compare:CC
+ (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI,rI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (gtu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64"
+ "@
+ subf%I2c %0,%1,%2\;subfe %0,%0,%0\;neg. %0,%0
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "12,16")])
+
+(define_split
+ [(set (match_operand:CC 3 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (gtu:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 0)
+ (gtu:DI (match_dup 1) (match_dup 2)))
+ (set (match_dup 3)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r,r")
+ (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "I,rI"))
+ (match_operand:SI 3 "reg_or_short_operand" "r,rI")))
+ (clobber (match_scratch:SI 4 "=&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {ai|addic} %4,%1,%k2\;{aze|addze} %0,%3
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;{sf%I3|subf%I3c} %0,%4,%3"
+ [(set_attr "length" "8,12")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r,r")
+ (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "I,rI"))
+ (match_operand:DI 3 "reg_or_short_operand" "r,rI")))
+ (clobber (match_scratch:DI 4 "=&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addic %4,%1,%k2\;addze %0,%3
+ subf%I2c %4,%1,%2\;subfe %4,%4,%4\;subf%I3c %0,%4,%3"
+ [(set_attr "length" "8,12")])
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {ai|addic} %4,%1,%k2\;{aze.|addze.} %4,%3
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %4,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12,12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:SI (gtu:SI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addic %4,%1,%k2\;addze. %4,%3
+ subf%I2c %4,%1,%2\;subfe %4,%4,%4\;subfc. %4,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12,12,16")])
+
+(define_split
+ [(set (match_operand:CC 0 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (match_operand:DI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(set (match_dup 4)
+ (plus:DI (gtu:DI (match_dup 1) (match_dup 2))
+ (match_dup 3)))
+ (set (match_dup 0)
+ (compare:CC (match_dup 4)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:SI 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:SI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 "=&r,&r,&r,&r"))]
+ "! TARGET_POWERPC64"
+ "@
+ {ai|addic} %4,%1,%k2\;{aze.|addze.} %0,%3
+ {sf%I2|subf%I2c} %4,%1,%2\;{sfe|subfe} %4,%4,%4\;{sf.|subfc.} %0,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12,12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "")
+ (match_operand:SI 2 "reg_or_short_operand" ""))
+ (match_operand:SI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:SI (gtu:SI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:CC 5 "cc_reg_operand" "=x,x,?y,?y")
+ (compare:CC
+ (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r,r,r,r")
+ (match_operand:DI 2 "reg_or_short_operand" "I,r,I,r"))
+ (match_operand:DI 3 "gpc_reg_operand" "r,r,r,r"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "=r,r,r,r")
+ (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:DI 4 "=&r,&r,&r,&r"))]
+ "TARGET_POWERPC64"
+ "@
+ addic %4,%1,%k2\;addze. %0,%3
+ subf%I2c %4,%1,%2\;subfe %4,%4,%4\;subfc. %0,%4,%3
+ #
+ #"
+ [(set_attr "type" "compare")
+ (set_attr "length" "8,12,12,16")])
+
+(define_split
+ [(set (match_operand:CC 5 "cc_reg_not_cr0_operand" "")
+ (compare:CC
+ (plus:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "")
+ (match_operand:DI 2 "reg_or_short_operand" ""))
+ (match_operand:DI 3 "gpc_reg_operand" ""))
+ (const_int 0)))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 0)
+ (plus:DI (gtu:DI (match_dup 1) (match_dup 2)) (match_dup 3)))
+ (clobber (match_dup 4))])
+ (set (match_dup 5)
+ (compare:CC (match_dup 0)
+ (const_int 0)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (neg:SI (gtu:SI (match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI"))))]
+ "! TARGET_POWERPC64"
+ "{sf%I2|subf%I2c} %0,%1,%2\;{sfe|subfe} %0,%0,%0"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "gpc_reg_operand" "=r")
+ (neg:DI (gtu:DI (match_operand:DI 1 "gpc_reg_operand" "r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI"))))]
+ "TARGET_POWERPC64"
+ "subf%I2c %0,%1,%2\;subfe %0,%0,%0"
+ [(set_attr "length" "8")])
+
+;; Define both directions of branch and return. If we need a reload
+;; register, we'd rather use CR0 since it is much easier to copy a
+;; register CC value to there.
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "x,?y")
+ (const_int 0)])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 0, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "x,?y")
+ (const_int 0)])
+ (return)
+ (pc)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 0, insn);
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "x,?y")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 0 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], \"%l0\", 1, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (match_operator 0 "branch_comparison_operator"
+ [(match_operand 1
+ "cc_reg_operand" "x,?y")
+ (const_int 0)])
+ (pc)
+ (return)))]
+ "direct_return ()"
+ "*
+{
+ return output_cbranch (operands[0], NULL, 1, insn);
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "4")])
+
+;; Logic on condition register values.
+
+; This pattern matches things like
+; (set (reg:CCEQ 68) (compare:CCEQ (ior:SI (gt:SI (reg:CCFP 68) (const_int 0))
+; (eq:SI (reg:CCFP 68) (const_int 0)))
+; (const_int 1)))
+; which are generated by the branch logic.
+
+(define_insn ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_operator"
+ [(match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "y")
+ (const_int 0)])])
+ (const_int 1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical")])
+
+; Why is the constant -1 here, but 1 in the previous pattern?
+; Because ~1 has all but the low bit set.
+(define_insn ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator:SI 1 "boolean_or_operator"
+ [(not:SI (match_operator:SI 2
+ "branch_positive_comparison_operator"
+ [(match_operand 3
+ "cc_reg_operand" "y")
+ (const_int 0)]))
+ (match_operator:SI 4
+ "branch_positive_comparison_operator"
+ [(match_operand 5
+ "cc_reg_operand" "y")
+ (const_int 0)])])
+ (const_int -1)))]
+ ""
+ "cr%q1 %E0,%j2,%j4"
+ [(set_attr "type" "cr_logical")])
+
+(define_insn ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator:SI 1
+ "branch_positive_comparison_operator"
+ [(match_operand 2
+ "cc_reg_operand" "y")
+ (const_int 0)])
+ (const_int 0)))]
+ ""
+ "{crnor %E0,%j1,%j1|crnot %E0,%j1}"
+ [(set_attr "type" "cr_logical")])
+
+;; If we are comparing the result of two comparisons, this can be done
+;; using creqv or crxor.
+
+(define_insn_and_split ""
+ [(set (match_operand:CCEQ 0 "cc_reg_operand" "=y")
+ (compare:CCEQ (match_operator 1 "branch_comparison_operator"
+ [(match_operand 2 "cc_reg_operand" "y")
+ (const_int 0)])
+ (match_operator 3 "branch_comparison_operator"
+ [(match_operand 4 "cc_reg_operand" "y")
+ (const_int 0)])))]
+ ""
+ "#"
+ ""
+ [(set (match_dup 0) (compare:CCEQ (xor:SI (match_dup 1) (match_dup 3))
+ (match_dup 5)))]
+ "
+{
+ int positive_1, positive_2;
+
+ positive_1 = branch_positive_comparison_operator (operands[1], CCEQmode);
+ positive_2 = branch_positive_comparison_operator (operands[3], CCEQmode);
+
+ if (! positive_1)
+ operands[1] = gen_rtx (rs6000_reverse_condition (GET_MODE (operands[2]),
+ GET_CODE (operands[1])),
+ SImode,
+ operands[2], const0_rtx);
+ else if (GET_MODE (operands[1]) != SImode)
+ operands[1] = gen_rtx (GET_CODE (operands[1]),
+ SImode,
+ operands[2], const0_rtx);
+
+ if (! positive_2)
+ operands[3] = gen_rtx (rs6000_reverse_condition (GET_MODE (operands[4]),
+ GET_CODE (operands[3])),
+ SImode,
+ operands[4], const0_rtx);
+ else if (GET_MODE (operands[3]) != SImode)
+ operands[3] = gen_rtx (GET_CODE (operands[3]),
+ SImode,
+ operands[4], const0_rtx);
+
+ if (positive_1 == positive_2)
+ {
+ operands[1] = gen_rtx_NOT (SImode, operands[1]);
+ operands[5] = constm1_rtx;
+ }
+ else
+ {
+ operands[5] = const1_rtx;
+ }
+}")
+
+;; Unconditional branch and return.
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "b %l0"
+ [(set_attr "type" "branch")])
+
+(define_insn "return"
+ [(return)]
+ "direct_return ()"
+ "{br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "register_operand" ""))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_jump_insn (gen_indirect_jumpsi (operands[0]));
+ else
+ emit_jump_insn (gen_indirect_jumpdi (operands[0]));
+ DONE;
+}")
+
+(define_insn "indirect_jumpsi"
+ [(set (pc) (match_operand:SI 0 "register_operand" "c,*l"))]
+ "TARGET_32BIT"
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn "indirect_jumpdi"
+ [(set (pc) (match_operand:DI 0 "register_operand" "c,*l"))]
+ "TARGET_64BIT"
+ "@
+ bctr
+ blr"
+ [(set_attr "type" "jmpreg")])
+
+;; Table jump for switch statements:
+(define_expand "tablejump"
+ [(use (match_operand 0 "" ""))
+ (use (label_ref (match_operand 1 "" "")))]
+ ""
+ "
+{
+ if (TARGET_32BIT)
+ emit_jump_insn (gen_tablejumpsi (operands[0], operands[1]));
+ else
+ emit_jump_insn (gen_tablejumpdi (operands[0], operands[1]));
+ DONE;
+}")
+
+(define_expand "tablejumpsi"
+ [(set (match_dup 3)
+ (plus:SI (match_operand:SI 0 "" "")
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_32BIT"
+ "
+{ operands[0] = force_reg (SImode, operands[0]);
+ operands[2] = force_reg (SImode, gen_rtx_LABEL_REF (SImode, operands[1]));
+ operands[3] = gen_reg_rtx (SImode);
+}")
+
+(define_expand "tablejumpdi"
+ [(set (match_dup 4)
+ (sign_extend:DI (match_operand:SI 0 "lwa_operand" "rm")))
+ (set (match_dup 3)
+ (plus:DI (match_dup 4)
+ (match_dup 2)))
+ (parallel [(set (pc) (match_dup 3))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "TARGET_64BIT"
+ "
+{ operands[2] = force_reg (DImode, gen_rtx_LABEL_REF (DImode, operands[1]));
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+}")
+
+(define_insn ""
+ [(set (pc)
+ (match_operand:SI 0 "register_operand" "c,*l"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "TARGET_32BIT"
+ "@
+ bctr
+ {br|blr}"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn ""
+ [(set (pc)
+ (match_operand:DI 0 "register_operand" "c,*l"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "TARGET_64BIT"
+ "@
+ bctr
+ blr"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "{cror 0,0,0|nop}")
+
+;; Define the subtract-one-and-jump insns, starting with the template
+;; so loop.c knows what to generate.
+
+(define_expand "doloop_end"
+ [(use (match_operand 0 "" "")) ; loop pseudo
+ (use (match_operand 1 "" "")) ; iterations; zero if unknown
+ (use (match_operand 2 "" "")) ; max iterations
+ (use (match_operand 3 "" "")) ; loop level
+ (use (match_operand 4 "" ""))] ; label
+ ""
+ "
+{
+ /* Only use this on innermost loops. */
+ if (INTVAL (operands[3]) > 1)
+ FAIL;
+ if (TARGET_POWERPC64)
+ {
+ if (GET_MODE (operands[0]) != DImode)
+ FAIL;
+ emit_jump_insn (gen_ctrdi (operands[0], operands[4]));
+ }
+ else
+ {
+ if (GET_MODE (operands[0]) != SImode)
+ FAIL;
+ emit_jump_insn (gen_ctrsi (operands[0], operands[4]));
+ }
+ DONE;
+}")
+
+(define_expand "ctrsi"
+ [(parallel [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "register_operand" "")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))
+ (clobber (match_scratch:CC 2 ""))
+ (clobber (match_scratch:SI 3 ""))])]
+ "! TARGET_POWERPC64"
+ "")
+
+(define_expand "ctrdi"
+ [(parallel [(set (pc)
+ (if_then_else (ne (match_operand:DI 0 "register_operand" "")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:DI (match_dup 0)
+ (const_int -1)))
+ (clobber (match_scratch:CC 2 ""))
+ (clobber (match_scratch:DI 3 ""))])]
+ "TARGET_POWERPC64"
+ "")
+
+;; We need to be able to do this for any operand, including MEM, or we
+;; will cause reload to blow up since we don't allow output reloads on
+;; JUMP_INSNs.
+;; For the length attribute to be calculated correctly, the
+;; label MUST be operand 0.
+
+(define_insn "*ctrsi_internal1"
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrsi_internal2"
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal1"
+ [(set (pc)
+ (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal2"
+ [(set (pc)
+ (if_then_else (ne (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+;; Similar, but we can use GE since we have a REG_NONNEG.
+
+(define_insn "*ctrsi_internal3"
+ [(set (pc)
+ (if_then_else (ge (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&X"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrsi_internal4"
+ [(set (pc)
+ (if_then_else (ge (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&X"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal3"
+ [(set (pc)
+ (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&X"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal4"
+ [(set (pc)
+ (if_then_else (ge (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 0))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&X"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64 && find_reg_note (insn, REG_NONNEG, 0)"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+;; Similar but use EQ
+
+(define_insn "*ctrsi_internal5"
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrsi_internal6"
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:SI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:SI 4 "=X,X,r"))]
+ "! TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal5"
+ [(set (pc)
+ (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"bdz %l0\";
+ else
+ return \"{bdn|bdnz} $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+(define_insn "*ctrdi_internal6"
+ [(set (pc)
+ (if_then_else (eq (match_operand:DI 1 "register_operand" "c,*r,*r")
+ (const_int 1))
+ (pc)
+ (label_ref (match_operand 0 "" ""))))
+ (set (match_operand:DI 2 "register_operand" "=1,*r,m*q*c*l")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 "=X,&x,&x"))
+ (clobber (match_scratch:DI 4 "=X,X,r"))]
+ "TARGET_POWERPC64"
+ "*
+{
+ if (which_alternative != 0)
+ return \"#\";
+ else if (get_attr_length (insn) == 4)
+ return \"{bdn|bdnz} %l0\";
+ else
+ return \"bdz $+8\;b %l0\";
+}"
+ [(set_attr "type" "branch")
+ (set_attr "length" "*,12,16")])
+
+;; Now the splitters if we could not allocate the CTR register
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:SI 0 "gpc_reg_operand" "")
+ (plus:SI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:SI (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 1)
+ (const_int -1)))])
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
+ const0_rtx); }")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:SI 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:SI 0 "nonimmediate_operand" "")
+ (plus:SI (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:SI 4 ""))]
+ "! TARGET_POWERPC64 && reload_completed
+ && ! gpc_reg_operand (operands[0], SImode)"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:SI (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 4)
+ (plus:SI (match_dup 1)
+ (const_int -1)))])
+ (set (match_dup 0)
+ (match_dup 4))
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
+ const0_rtx); }")
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:DI 0 "gpc_reg_operand" "")
+ (plus:DI (match_dup 1)
+ (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:DI (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 0)
+ (plus:DI (match_dup 1)
+ (const_int -1)))])
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
+ const0_rtx); }")
+
+(define_split
+ [(set (pc)
+ (if_then_else (match_operator 2 "comparison_operator"
+ [(match_operand:DI 1 "gpc_reg_operand" "")
+ (const_int 1)])
+ (match_operand 5 "" "")
+ (match_operand 6 "" "")))
+ (set (match_operand:DI 0 "nonimmediate_operand" "")
+ (plus:DI (match_dup 1) (const_int -1)))
+ (clobber (match_scratch:CC 3 ""))
+ (clobber (match_scratch:DI 4 ""))]
+ "TARGET_POWERPC64 && reload_completed
+ && ! gpc_reg_operand (operands[0], DImode)"
+ [(parallel [(set (match_dup 3)
+ (compare:CC (plus:DI (match_dup 1)
+ (const_int -1))
+ (const_int 0)))
+ (set (match_dup 4)
+ (plus:DI (match_dup 1)
+ (const_int -1)))])
+ (set (match_dup 0)
+ (match_dup 4))
+ (set (pc) (if_then_else (match_dup 7)
+ (match_dup 5)
+ (match_dup 6)))]
+ "
+{ operands[7] = gen_rtx (GET_CODE (operands[2]), VOIDmode, operands[3],
+ const0_rtx); }")
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 0))]
+ ""
+ "{t 31,0,0|trap}")
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand 1 "const_int_operand" ""))]
+ ""
+ "if (rs6000_compare_fp_p || operands[1] != const0_rtx) FAIL;
+ operands[2] = rs6000_compare_op0;
+ operands[3] = rs6000_compare_op1;")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "reg_or_short_operand" "rI")])
+ (const_int 0))]
+ ""
+ "{t|tw}%V0%I2 %1,%2")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "trap_comparison_operator"
+ [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "reg_or_short_operand" "rI")])
+ (const_int 0))]
+ "TARGET_POWERPC64"
+ "td%V0%I2 %1,%2")
+
+;; Insns related to generating the function prologue and epilogue.
+
+(define_expand "prologue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_prologue ();
+ DONE;
+}")
+
+(define_insn "movesi_from_cr"
+ [(set (match_operand:SI 0 "gpc_reg_operand" "=r")
+ (unspec:SI [(reg:CC 68) (reg:CC 69) (reg:CC 70) (reg:CC 71)
+ (reg:CC 72) (reg:CC 73) (reg:CC 74) (reg:CC 75)] 19))]
+ ""
+ "mfcr %0")
+
+(define_insn "*stmw"
+ [(match_parallel 0 "stmw_operation"
+ [(set (match_operand:SI 1 "memory_operand" "=m")
+ (match_operand:SI 2 "gpc_reg_operand" "r"))])]
+ "TARGET_MULTIPLE"
+ "{stm|stmw} %2,%1")
+
+(define_insn "*save_fpregs_si"
+ [(match_parallel 0 "any_operand"
+ [(clobber (match_operand:SI 1 "register_operand" "=l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "memory_operand" "=m")
+ (match_operand:DF 4 "gpc_reg_operand" "f"))])]
+ "TARGET_32BIT"
+ "bl %z2")
+
+(define_insn "*save_fpregs_di"
+ [(match_parallel 0 "any_operand"
+ [(clobber (match_operand:DI 1 "register_operand" "=l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "memory_operand" "=m")
+ (match_operand:DF 4 "gpc_reg_operand" "f"))])]
+ "TARGET_64BIT"
+ "bl %z2")
+
+; These are to explain that changes to the stack pointer should
+; not be moved over stores to stack memory.
+(define_insn "stack_tie"
+ [(set (match_operand:BLK 0 "memory_operand" "+m")
+ (unspec:BLK [(match_dup 0)] 5))]
+ ""
+ ""
+ [(set_attr "length" "0")])
+
+
+(define_expand "epilogue"
+ [(use (const_int 0))]
+ "TARGET_SCHED_PROLOG"
+ "
+{
+ rs6000_emit_epilogue (FALSE);
+ DONE;
+}")
+
+; On some processors, doing the mtcrf one CC register at a time is
+; faster (like on the 604e). On others, doing them all at once is
+; faster; for instance, on the 601 and 750.
+
+(define_expand "movsi_to_cr_one"
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_dup 2)] 20))]
+ ""
+ "operands[2] = GEN_INT (1 << (75 - REGNO (operands[0])));")
+
+(define_insn "*movsi_to_cr"
+ [(match_parallel 0 "mtcrf_operation"
+ [(set (match_operand:CC 1 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 2 "gpc_reg_operand" "r")
+ (match_operand 3 "immediate_operand" "n")]
+ 20))])]
+ ""
+ "*
+{
+ int mask = 0;
+ int i;
+ for (i = 0; i < XVECLEN (operands[0], 0); i++)
+ mask |= INTVAL (XVECEXP (SET_SRC (XVECEXP (operands[0], 0, i)), 0, 1));
+ operands[4] = GEN_INT (mask);
+ return \"mtcrf %4,%2\";
+}")
+
+(define_insn ""
+ [(set (match_operand:CC 0 "cc_reg_operand" "=y")
+ (unspec:CC [(match_operand:SI 1 "gpc_reg_operand" "r")
+ (match_operand 2 "immediate_operand" "n")] 20))]
+ "GET_CODE (operands[0]) == REG
+ && CR_REGNO_P (REGNO (operands[0]))
+ && GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 1 << (75 - REGNO (operands[0]))"
+ "mtcrf %R0,%1")
+
+; The load-multiple instructions have similar properties.
+; Note that "load_multiple" is a name known to the machine-independent
+; code that actually corresponds to the powerpc load-string.
+
+(define_insn "*lmw"
+ [(match_parallel 0 "lmw_operation"
+ [(set (match_operand:SI 1 "gpc_reg_operand" "=r")
+ (match_operand:SI 2 "memory_operand" "m"))])]
+ "TARGET_MULTIPLE"
+ "{lm|lmw} %1,%2")
+
+(define_insn "*return_internal_si"
+ [(return)
+ (use (match_operand:SI 0 "register_operand" "lc"))]
+ "TARGET_32BIT"
+ "b%T0"
+ [(set_attr "type" "jmpreg")])
+
+(define_insn "*return_internal_di"
+ [(return)
+ (use (match_operand:DI 0 "register_operand" "lc"))]
+ "TARGET_64BIT"
+ "b%T0"
+ [(set_attr "type" "jmpreg")])
+
+; FIXME: This would probably be somewhat simpler if the Cygnus sibcall
+; stuff was in GCC. Oh, and "any_operand" is a bit flexible...
+
+(define_insn "*return_and_restore_fpregs_si"
+ [(match_parallel 0 "any_operand"
+ [(return)
+ (use (match_operand:SI 1 "register_operand" "l"))
+ (use (match_operand:SI 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "gpc_reg_operand" "=f")
+ (match_operand:DF 4 "memory_operand" "m"))])]
+ "TARGET_32BIT"
+ "b %z2")
+
+(define_insn "*return_and_restore_fpregs_di"
+ [(match_parallel 0 "any_operand"
+ [(return)
+ (use (match_operand:DI 1 "register_operand" "l"))
+ (use (match_operand:DI 2 "call_operand" "s"))
+ (set (match_operand:DF 3 "gpc_reg_operand" "=f")
+ (match_operand:DF 4 "memory_operand" "m"))])]
+ "TARGET_64BIT"
+ "b %z2")
+
+; This is used in compiling the unwind routines.
+(define_expand "eh_return"
+ [(use (match_operand 0 "general_operand" ""))
+ (use (match_operand 1 "general_operand" ""))]
+ ""
+ "
+{
+#if TARGET_AIX
+ rs6000_emit_eh_toc_restore (operands[0]);
+#endif
+ if (TARGET_32BIT)
+ emit_insn (gen_eh_set_lr_si (operands[1]));
+ else
+ emit_insn (gen_eh_set_lr_di (operands[1]));
+ emit_move_insn (EH_RETURN_STACKADJ_RTX, operands[0]);
+ DONE;
+}")
+
+; We can't expand this before we know where the link register is stored.
+(define_insn "eh_set_lr_si"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "r")] 9)
+ (clobber (match_scratch:SI 1 "=&b"))]
+ "TARGET_32BIT"
+ "#")
+
+(define_insn "eh_set_lr_di"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r")] 9)
+ (clobber (match_scratch:DI 1 "=&b"))]
+ "TARGET_64BIT"
+ "#")
+
+(define_split
+ [(unspec_volatile [(match_operand 0 "register_operand" "")] 9)
+ (clobber (match_scratch 1 ""))]
+ "reload_completed"
+ [(const_int 0)]
+ "
+{
+ rs6000_stack_t *info = rs6000_stack_info ();
+
+ if (info->lr_save_p)
+ {
+ rtx frame_rtx = stack_pointer_rtx;
+ int sp_offset = 0;
+ rtx tmp;
+
+ if (frame_pointer_needed
+ || current_function_calls_alloca
+ || info->total_size > 32767)
+ {
+ emit_move_insn (operands[1], gen_rtx_MEM (Pmode, frame_rtx));
+ frame_rtx = operands[1];
+ }
+ else if (info->push_p)
+ sp_offset = info->total_size;
+
+ tmp = plus_constant (frame_rtx, info->lr_save_offset + sp_offset);
+ tmp = gen_rtx_MEM (Pmode, tmp);
+ emit_move_insn (tmp, operands[0]);
+ }
+ else
+ emit_move_insn (gen_rtx_REG (Pmode, LINK_REGISTER_REGNUM), operands[0]);
+ DONE;
+}")
+
+(define_insn "prefetch"
+ [(prefetch (match_operand:V4SI 0 "address_operand" "p")
+ (match_operand:SI 1 "const_int_operand" "n")
+ (match_operand:SI 2 "const_int_operand" "n"))]
+ "TARGET_POWERPC"
+ "*
+{
+ if (GET_CODE (operands[0]) == REG)
+ return INTVAL (operands[1]) ? \"dcbtst 0,%0\" : \"dcbt 0,%0\";
+ return INTVAL (operands[1]) ? \"dcbtst %a0\" : \"dcbt %a0\";
+}"
+ [(set_attr "type" "load")])
+
+;; AltiVec patterns
+
+;; Generic LVX load instruction.
+(define_insn "altivec_lvx_4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (match_operand:V4SI 1 "memory_operand" "m"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx_8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (match_operand:V8HI 1 "memory_operand" "m"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx_16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (match_operand:V16QI 1 "memory_operand" "m"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx_4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (match_operand:V4SF 1 "memory_operand" "m"))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%y1"
+ [(set_attr "type" "vecload")])
+
+;; Generic STVX store instruction.
+(define_insn "altivec_stvx_4si"
+ [(set (match_operand:V4SI 0 "memory_operand" "=m")
+ (match_operand:V4SI 1 "register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvx_8hi"
+ [(set (match_operand:V8HI 0 "memory_operand" "=m")
+ (match_operand:V8HI 1 "register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvx_16qi"
+ [(set (match_operand:V16QI 0 "memory_operand" "=m")
+ (match_operand:V16QI 1 "register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvx_4sf"
+ [(set (match_operand:V4SF 0 "memory_operand" "=m")
+ (match_operand:V4SF 1 "register_operand" "v"))]
+ "TARGET_ALTIVEC"
+ "stvx %1,%y0"
+ [(set_attr "type" "vecstore")])
+
+;; Vector move instructions.
+(define_expand "movv4si"
+ [(set (match_operand:V4SI 0 "nonimmediate_operand" "")
+ (match_operand:V4SI 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+ "{ rs6000_emit_move (operands[0], operands[1], V4SImode); DONE; }")
+
+(define_insn "*movv4si_internal"
+ [(set (match_operand:V4SI 0 "nonimmediate_operand" "=m,v,v")
+ (match_operand:V4SI 1 "input_operand" "v,m,v"))]
+ "TARGET_ALTIVEC"
+ "@
+ stvx %1,%y0
+ lvx %0,%y1
+ vor %0,%1,%1"
+ [(set_attr "type" "altivec")])
+
+(define_expand "movv8hi"
+ [(set (match_operand:V8HI 0 "nonimmediate_operand" "")
+ (match_operand:V8HI 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+ "{ rs6000_emit_move (operands[0], operands[1], V8HImode); DONE; }")
+
+(define_insn "*movv8hi_internal1"
+ [(set (match_operand:V8HI 0 "nonimmediate_operand" "=m,v,v")
+ (match_operand:V8HI 1 "input_operand" "v,m,v"))]
+ "TARGET_ALTIVEC"
+ "@
+ stvx %1,%y0
+ lvx %0,%y1
+ vor %0,%1,%1"
+ [(set_attr "type" "altivec")])
+
+(define_expand "movv16qi"
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "")
+ (match_operand:V16QI 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+ "{ rs6000_emit_move (operands[0], operands[1], V16QImode); DONE; }")
+
+(define_insn "*movv16qi_internal1"
+ [(set (match_operand:V16QI 0 "nonimmediate_operand" "=m,v,v")
+ (match_operand:V16QI 1 "input_operand" "v,m,v"))]
+ "TARGET_ALTIVEC"
+ "@
+ stvx %1,%y0
+ lvx %0,%y1
+ vor %0,%1,%1"
+ [(set_attr "type" "altivec")])
+
+(define_expand "movv4sf"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "")
+ (match_operand:V4SF 1 "any_operand" ""))]
+ "TARGET_ALTIVEC"
+ "{ rs6000_emit_move (operands[0], operands[1], V4SFmode); DONE; }")
+
+(define_insn "*movv4sf_internal1"
+ [(set (match_operand:V4SF 0 "nonimmediate_operand" "=m,v,v")
+ (match_operand:V4SF 1 "input_operand" "v,m,v"))]
+ "TARGET_ALTIVEC"
+ "@
+ stvx %1,%y0
+ lvx %0,%y1
+ vor %0,%1,%1"
+ [(set_attr "type" "altivec")])
+
+(define_insn "*set_vrsave_internal"
+ [(match_parallel 0 "vrsave_operation"
+ [(set (reg:SI 109)
+ (unspec_volatile:SI [(match_operand:SI 1 "register_operand" "r")
+ (reg:SI 109)] 30))])]
+ "TARGET_ALTIVEC"
+ "mtvrsave %1"
+ [(set_attr "type" "altivec")])
+
+;; Simple binary operations.
+
+(define_insn "addv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (plus:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddubm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "addv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (plus:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vadduhm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "addv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (plus:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vadduwm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "addv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vaddfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vaddcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 35))]
+ "TARGET_ALTIVEC"
+ "vaddcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddubs"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 36))]
+ "TARGET_ALTIVEC"
+ "vaddubs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddsbs"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 37))]
+ "TARGET_ALTIVEC"
+ "vaddsbs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vadduhs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 38))]
+ "TARGET_ALTIVEC"
+ "vadduhs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 39))]
+ "TARGET_ALTIVEC"
+ "vaddshs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vadduws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 40))]
+ "TARGET_ALTIVEC"
+ "vadduws %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vaddsws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 41))]
+ "TARGET_ALTIVEC"
+ "vaddsws %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "andv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (and:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vand %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vandc"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (and:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (not:V4SI (match_operand:V4SI 2 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vandc %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgub"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 44))]
+ "TARGET_ALTIVEC"
+ "vavgub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgsb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 45))]
+ "TARGET_ALTIVEC"
+ "vavgsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavguh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 46))]
+ "TARGET_ALTIVEC"
+ "vavguh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgsh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 47))]
+ "TARGET_ALTIVEC"
+ "vavgsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavguw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 48))]
+ "TARGET_ALTIVEC"
+ "vavguw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vavgsw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 49))]
+ "TARGET_ALTIVEC"
+ "vavgsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpbfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 50))]
+ "TARGET_ALTIVEC"
+ "vcmpbfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpequb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 51))]
+ "TARGET_ALTIVEC"
+ "vcmpequb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 52))]
+ "TARGET_ALTIVEC"
+ "vcmpequh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpequw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 53))]
+ "TARGET_ALTIVEC"
+ "vcmpequw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpeqfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 54))]
+ "TARGET_ALTIVEC"
+ "vcmpeqfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgefp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 55))]
+ "TARGET_ALTIVEC"
+ "vcmpgefp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtub"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 56))]
+ "TARGET_ALTIVEC"
+ "vcmpgtub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 57))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 58))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 59))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 60))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtsw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 61))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vcmpgtfp"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 62))]
+ "TARGET_ALTIVEC"
+ "vcmpgtfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+;; Fused multiply add
+(define_insn "altivec_vmaddfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaddfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+;; The unspec here is a vec splat of 0. We do multiply as a fused
+;; multiply-add with an add of a 0 vector.
+
+(define_expand "mulv4sf3"
+ [(set (match_dup 3) (unspec:V4SF [(const_int 0)] 142))
+ (set (match_operand:V4SF 0 "register_operand" "=v")
+ (plus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_dup 3)))]
+ "TARGET_ALTIVEC && TARGET_FUSED_MADD"
+ "
+{ operands[3] = gen_reg_rtx (V4SFmode); }")
+
+;; Fused multiply subtract
+(define_insn "altivec_vnmsubfp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (minus:V4SF (mult:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v"))
+ (match_operand:V4SF 3 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vnmsubfp %0,%1,%2,%3"
+ [(set_attr "type" "vecfloat")])
+
+
+(define_insn "altivec_vmsumubm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 65))]
+ "TARGET_ALTIVEC"
+ "vmsumubm %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsummbm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 66))]
+ "TARGET_ALTIVEC"
+ "vmsumubm %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumuhm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 67))]
+ "TARGET_ALTIVEC"
+ "vmsumuhm %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshm"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 68))]
+ "TARGET_ALTIVEC"
+ "vmsumshm %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumuhs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 69))]
+ "TARGET_ALTIVEC"
+ "vmsumuhs %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmsumshs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 70))]
+ "TARGET_ALTIVEC"
+ "vmsumshs %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "umaxv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (umax:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (smax:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "umaxv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (umax:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxuh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (smax:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "umaxv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (umax:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (smax:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "smaxv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smax:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vmaxfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vmhaddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")] 71))]
+ "TARGET_ALTIVEC"
+ "vmhaddshs %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+(define_insn "altivec_vmhraddshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")] 72))]
+ "TARGET_ALTIVEC"
+ "vmhraddshs %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+(define_insn "altivec_vmladduhm"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")] 73))]
+ "TARGET_ALTIVEC"
+ "vmladduhm %0, %1, %2, %3"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmrghb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 9)
+ (const_int 10)
+ (const_int 11)
+ (const_int 12)
+ (const_int 13)
+ (const_int 14)
+ (const_int 15)
+ (const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))
+ (match_operand:V16QI 2 "register_operand" "v")
+ (const_int 255)))]
+ "TARGET_ALTIVEC"
+ "vmrghb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)
+ (const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))
+ (match_operand:V8HI 2 "register_operand" "v")
+ (const_int 15)))]
+ "TARGET_ALTIVEC"
+ "vmrghh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrghw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 3)
+ (const_int 0)
+ (const_int 1)]))
+ (match_operand:V4SI 2 "register_operand" "v")
+ (const_int 12)))]
+ "TARGET_ALTIVEC"
+ "vmrghw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (vec_merge:V16QI (vec_select:V16QI (match_operand:V16QI 2 "register_operand" "v")
+ (parallel [(const_int 8)
+ (const_int 9)
+ (const_int 10)
+ (const_int 11)
+ (const_int 12)
+ (const_int 13)
+ (const_int 14)
+ (const_int 15)
+ (const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)
+ (const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)]))
+ (match_operand:V16QI 1 "register_operand" "v")
+ (const_int 255)))]
+ "TARGET_ALTIVEC"
+ "vmrglb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (vec_merge:V8HI (vec_select:V8HI (match_operand:V8HI 2 "register_operand" "v")
+ (parallel [(const_int 4)
+ (const_int 5)
+ (const_int 6)
+ (const_int 7)
+ (const_int 0)
+ (const_int 1)
+ (const_int 2)
+ (const_int 3)]))
+ (match_operand:V8HI 1 "register_operand" "v")
+ (const_int 15)))]
+ "TARGET_ALTIVEC"
+ "vmrglh %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vmrglw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (vec_merge:V4SI (vec_select:V4SI (match_operand:V4SI 2 "register_operand" "v")
+ (parallel [(const_int 2)
+ (const_int 3)
+ (const_int 0)
+ (const_int 1)]))
+ (match_operand:V4SI 1 "register_operand" "v")
+ (const_int 12)))]
+ "TARGET_ALTIVEC"
+ "vmrglw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "uminv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (umin:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminub %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (smin:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminsb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "uminv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (umin:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminuh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (smin:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminsh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "uminv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (umin:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (smin:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminsw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "sminv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (smin:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vminfp %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vmuleub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 83))]
+ "TARGET_ALTIVEC"
+ "vmuleub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 84))]
+ "TARGET_ALTIVEC"
+ "vmulesb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuleuh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 85))]
+ "TARGET_ALTIVEC"
+ "vmuleuh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulesh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 86))]
+ "TARGET_ALTIVEC"
+ "vmulesh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmuloub"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 87))]
+ "TARGET_ALTIVEC"
+ "vmuloub %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 88))]
+ "TARGET_ALTIVEC"
+ "vmulosb %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulouh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 89))]
+ "TARGET_ALTIVEC"
+ "vmulouh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vmulosh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 90))]
+ "TARGET_ALTIVEC"
+ "vmulosh %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vnor"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (not:V4SI (ior:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v"))))]
+ "TARGET_ALTIVEC"
+ "vnor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "iorv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (ior:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vpkuhum"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 93))]
+ "TARGET_ALTIVEC"
+ "vpkuhum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwum"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 94))]
+ "TARGET_ALTIVEC"
+ "vpkuwum %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkpx"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 95))]
+ "TARGET_ALTIVEC"
+ "vpkpx %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuhss"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 96))]
+ "TARGET_ALTIVEC"
+ "vpkuhss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshss"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 97))]
+ "TARGET_ALTIVEC"
+ "vpkshss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwss"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 98))]
+ "TARGET_ALTIVEC"
+ "vpkuwss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswss"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 99))]
+ "TARGET_ALTIVEC"
+ "vpkswss %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuhus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 100))]
+ "TARGET_ALTIVEC"
+ "vpkuhus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkshus"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 101))]
+ "TARGET_ALTIVEC"
+ "vpkshus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkuwus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 102))]
+ "TARGET_ALTIVEC"
+ "vpkuwus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vpkswus"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 103))]
+ "TARGET_ALTIVEC"
+ "vpkswus %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrlb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 104))]
+ "TARGET_ALTIVEC"
+ "vrlb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vrlh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 105))]
+ "TARGET_ALTIVEC"
+ "vrlh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vrlw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 106))]
+ "TARGET_ALTIVEC"
+ "vrlw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vslb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 107))]
+ "TARGET_ALTIVEC"
+ "vslb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vslh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 108))]
+ "TARGET_ALTIVEC"
+ "vslh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vslw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 109))]
+ "TARGET_ALTIVEC"
+ "vslw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsl"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 110))]
+ "TARGET_ALTIVEC"
+ "vsl %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vslo"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 111))]
+ "TARGET_ALTIVEC"
+ "vslo %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsrb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 112))]
+ "TARGET_ALTIVEC"
+ "vsrb %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsrh"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 113))]
+ "TARGET_ALTIVEC"
+ "vsrh %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsrw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 114))]
+ "TARGET_ALTIVEC"
+ "vsrw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsrab"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 115))]
+ "TARGET_ALTIVEC"
+ "vsrab %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsrah"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 116))]
+ "TARGET_ALTIVEC"
+ "vsrah %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsraw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 117))]
+ "TARGET_ALTIVEC"
+ "vsraw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsr"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 118))]
+ "TARGET_ALTIVEC"
+ "vsr %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsro"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 119))]
+ "TARGET_ALTIVEC"
+ "vsro %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "subv16qi3"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (minus:V16QI (match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsububm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "subv8hi3"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (minus:V8HI (match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubuhm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "subv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (minus:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubuwm %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "subv4sf3"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (minus:V4SF (match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vsubfp %0,%1,%2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vsubcuw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 124))]
+ "TARGET_ALTIVEC"
+ "vsubcuw %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsububs"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 125))]
+ "TARGET_ALTIVEC"
+ "vsububs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubsbs"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 126))]
+ "TARGET_ALTIVEC"
+ "vsubsbs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubuhs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 127))]
+ "TARGET_ALTIVEC"
+ "vsubuhs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubshs"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 128))]
+ "TARGET_ALTIVEC"
+ "vsubshs %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubuws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 129))]
+ "TARGET_ALTIVEC"
+ "vsubuws %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsubsws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 130))]
+ "TARGET_ALTIVEC"
+ "vsubsws %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vsum4ubs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 131))]
+ "TARGET_ALTIVEC"
+ "vsum4ubs %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum4sbs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 132))]
+ "TARGET_ALTIVEC"
+ "vsum4sbs %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum4shs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 133))]
+ "TARGET_ALTIVEC"
+ "vsum4shs %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsum2sws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 134))]
+ "TARGET_ALTIVEC"
+ "vsum2sws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "altivec_vsumsws"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 135))]
+ "TARGET_ALTIVEC"
+ "vsumsws %0,%1,%2"
+ [(set_attr "type" "veccomplex")])
+
+(define_insn "xorv4si3"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (xor:V4SI (match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vxor %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vspltb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 136))]
+ "TARGET_ALTIVEC"
+ "vspltb %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+(define_insn "altivec_vsplth"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 137))]
+ "TARGET_ALTIVEC"
+ "vsplth %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 138))]
+ "TARGET_ALTIVEC"
+ "vspltw %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vspltisb"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:QI 1 "immediate_operand" "i")] 139))]
+ "TARGET_ALTIVEC"
+ "vspltisb %0, %1"
+ [(set_attr "type" "vecsimple")])
+
+
+(define_insn "altivec_vspltish"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:QI 1 "immediate_operand" "i")] 140))]
+ "TARGET_ALTIVEC"
+ "vspltish %0, %1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_vspltisw"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:QI 1 "immediate_operand" "i")] 141))]
+ "TARGET_ALTIVEC"
+ "vspltisw %0, %1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn ""
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:QI 1 "immediate_operand" "i")] 142))]
+ "TARGET_ALTIVEC"
+ "vspltisw %0, %1"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "ftruncv4sf2"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (fix:V4SF (match_operand:V4SF 1 "register_operand" "v")))]
+ "TARGET_ALTIVEC"
+ "vrfiz %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vperm_4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")] 144))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vperm_4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")] 145))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vperm_8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")] 146))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vperm_16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")] 147))]
+ "TARGET_ALTIVEC"
+ "vperm %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vrfip"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 148))]
+ "TARGET_ALTIVEC"
+ "vrfip %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfin"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 149))]
+ "TARGET_ALTIVEC"
+ "vrfin %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrfim"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 150))]
+ "TARGET_ALTIVEC"
+ "vrfim %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfux"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 151))]
+ "TARGET_ALTIVEC"
+ "vcfux %0, %1, %2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vcfsx"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 152))]
+ "TARGET_ALTIVEC"
+ "vcfsx %0, %1, %2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctuxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 153))]
+ "TARGET_ALTIVEC"
+ "vctuxs %0, %1, %2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vctsxs"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:QI 2 "immediate_operand" "i")] 154))]
+ "TARGET_ALTIVEC"
+ "vctsxs %0, %1, %2"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vlogefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 155))]
+ "TARGET_ALTIVEC"
+ "vlogefp %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vexptefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 156))]
+ "TARGET_ALTIVEC"
+ "vexptefp %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrsqrtefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 157))]
+ "TARGET_ALTIVEC"
+ "vrsqrtefp %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vrefp"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")] 158))]
+ "TARGET_ALTIVEC"
+ "vrefp %0, %1"
+ [(set_attr "type" "vecfloat")])
+
+(define_insn "altivec_vsel_4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 159))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:V4SI 3 "register_operand" "v")] 160))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:V8HI 3 "register_operand" "v")] 161))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsel_16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:V16QI 3 "register_operand" "v")] 162))]
+ "TARGET_ALTIVEC"
+ "vsel %0,%1,%2,%3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_4si"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")] 163))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0, %1, %2, %3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_4sf"
+ [(set (match_operand:V4SF 0 "register_operand" "=v")
+ (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")] 164))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0, %1, %2, %3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_8hi"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")] 165))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0, %1, %2, %3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vsldoi_16qi"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")
+ (match_operand:QI 3 "immediate_operand" "i")] 166))]
+ "TARGET_ALTIVEC"
+ "vsldoi %0, %1, %2, %3"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] 167))]
+ "TARGET_ALTIVEC"
+ "vupkhsb %0, %1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 168))]
+ "TARGET_ALTIVEC"
+ "vupkhpx %0, %1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupkhsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 169))]
+ "TARGET_ALTIVEC"
+ "vupkhsh %0, %1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsb"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v")] 170))]
+ "TARGET_ALTIVEC"
+ "vupklsb %0, %1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklpx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 171))]
+ "TARGET_ALTIVEC"
+ "vupklpx %0, %1"
+ [(set_attr "type" "vecperm")])
+
+(define_insn "altivec_vupklsh"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")] 172))]
+ "TARGET_ALTIVEC"
+ "vupklsh %0, %1"
+ [(set_attr "type" "vecperm")])
+
+;; AltiVec predicates.
+
+(define_insn "altivec_vcmpequb_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 173))]
+ "TARGET_ALTIVEC"
+ "vcmpequb. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpequh_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 174))]
+ "TARGET_ALTIVEC"
+ "vcmpequh. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpequw_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 175))]
+ "TARGET_ALTIVEC"
+ "vcmpequw. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpeqfp_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 176))]
+ "TARGET_ALTIVEC"
+ "vcmpeqfp. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtub_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 177))]
+ "TARGET_ALTIVEC"
+ "vcmpgtub. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtsb_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V16QI 1 "register_operand" "v")
+ (match_operand:V16QI 2 "register_operand" "v")] 178))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsb. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtuw_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 179))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuw. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtsw_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "v")
+ (match_operand:V4SI 2 "register_operand" "v")] 180))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsw. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgefp_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 181))]
+ "TARGET_ALTIVEC"
+ "vcmpgefp. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtfp_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 182))]
+ "TARGET_ALTIVEC"
+ "vcmpgtfp. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpbfp_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V4SF 1 "register_operand" "v")
+ (match_operand:V4SF 2 "register_operand" "v")] 183))]
+ "TARGET_ALTIVEC"
+ "vcmpbfp. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtuh_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 184))]
+ "TARGET_ALTIVEC"
+ "vcmpgtuh. %0,%1,%2"
+[(set_attr "type" "veccmp")])
+
+(define_insn "altivec_vcmpgtsh_p"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v")
+ (match_operand:V8HI 2 "register_operand" "v")] 185))]
+ "TARGET_ALTIVEC"
+ "vcmpgtsh. %0,%1,%2"
+ [(set_attr "type" "veccmp")])
+
+(define_insn "altivec_mtvscr"
+ [(unspec [(match_operand:V4SI 0 "register_operand" "v")] 186)]
+ "TARGET_ALTIVEC"
+ "mtvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_mfvscr"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(const_int 0)] 187))]
+ "TARGET_ALTIVEC"
+ "mfvscr %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dssall"
+ [(unspec [(const_int 0)] 188)]
+ "TARGET_ALTIVEC"
+ "dssall"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dss"
+ [(unspec [(match_operand:QI 0 "immediate_operand" "i")] 189)]
+ "TARGET_ALTIVEC"
+ "dss %0"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dst"
+ [(unspec [(match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 190)]
+ "TARGET_ALTIVEC"
+ "dst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstt"
+ [(unspec [(match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 191)]
+ "TARGET_ALTIVEC"
+ "dstt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dstst"
+ [(unspec [(match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 192)]
+ "TARGET_ALTIVEC"
+ "dstst %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_dststt"
+ [(unspec [(match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")
+ (match_operand:QI 2 "immediate_operand" "i")] 193)]
+ "TARGET_ALTIVEC"
+ "dststt %0,%1,%2"
+ [(set_attr "type" "vecsimple")])
+
+(define_insn "altivec_lvsl"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 194))]
+ "TARGET_ALTIVEC"
+ "lvsl %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvsr"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 195))]
+ "TARGET_ALTIVEC"
+ "lvsr %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvebx"
+ [(set (match_operand:V16QI 0 "register_operand" "=v")
+ (unspec:V16QI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 196))]
+ "TARGET_ALTIVEC"
+ "lvebx %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvehx"
+ [(set (match_operand:V8HI 0 "register_operand" "=v")
+ (unspec:V8HI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 197))]
+ "TARGET_ALTIVEC"
+ "lvehx %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvewx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 198))]
+ "TARGET_ALTIVEC"
+ "lvewx %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvxl"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 199))]
+ "TARGET_ALTIVEC"
+ "lvxl %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+(define_insn "altivec_lvx"
+ [(set (match_operand:V4SI 0 "register_operand" "=v")
+ (unspec:V4SI [(match_operand:SI 1 "register_operand" "b")
+ (match_operand:SI 2 "register_operand" "r")] 200))]
+ "TARGET_ALTIVEC"
+ "lvx %0,%1,%2"
+ [(set_attr "type" "vecload")])
+
+;; Parallel the STV*'s with unspecs because some of them have
+;; identical rtl but are different instructions-- and gcc gets confused.
+
+(define_insn "altivec_stvx"
+ [(parallel
+ [(set (mem:V4SI
+ (and:SI (plus:SI (match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int -16)))
+ (match_operand:V4SI 2 "register_operand" "v"))
+ (unspec [(const_int 0)] 201)])]
+ "TARGET_ALTIVEC"
+ "stvx %2,%0,%1"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvxl"
+ [(parallel
+ [(set (mem:V4SI
+ (and:SI (plus:SI (match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int -16)))
+ (match_operand:V4SI 2 "register_operand" "v"))
+ (unspec [(const_int 0)] 202)])]
+ "TARGET_ALTIVEC"
+ "stvxl %2,%0,%1"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvebx"
+ [(parallel
+ [(set (mem:V16QI
+ (plus:SI (match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r")))
+ (match_operand:V16QI 2 "register_operand" "v"))
+ (unspec [(const_int 0)] 203)])]
+ "TARGET_ALTIVEC"
+ "stvebx %2,%0,%1"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvehx"
+ [(parallel
+ [(set (mem:V8HI
+ (and:SI (plus:SI (match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int -2)))
+ (match_operand:V8HI 2 "register_operand" "v"))
+ (unspec [(const_int 0)] 204)])]
+ "TARGET_ALTIVEC"
+ "stvehx %2,%0,%1"
+ [(set_attr "type" "vecstore")])
+
+(define_insn "altivec_stvewx"
+ [(parallel
+ [(set (mem:V4SI
+ (and:SI (plus:SI (match_operand:SI 0 "register_operand" "b")
+ (match_operand:SI 1 "register_operand" "r"))
+ (const_int -4)))
+ (match_operand:V4SI 2 "register_operand" "v"))
+ (unspec [(const_int 0)] 205)])]
+ "TARGET_ALTIVEC"
+ "stvewx %2,%0,%1"
+ [(set_attr "type" "vecstore")])
diff --git a/contrib/gcc/config/rs6000/rtems.h b/contrib/gcc/config/rs6000/rtems.h
new file mode 100644
index 0000000..6810d38
--- /dev/null
+++ b/contrib/gcc/config/rs6000/rtems.h
@@ -0,0 +1,34 @@
+/* Definitions for rtems targeting a PowerPC using elf.
+ Copyright (C) 1996, 1997, 2000, 2001 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Specify predefined symbols in preprocessor. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-DPPC -Drtems -D__rtems__ \
+ -Asystem=rtems -Acpu=powerpc -Amachine=powerpc"
+
+/* Generate calls to memcpy, memcmp and memset. */
+#ifndef TARGET_MEM_FUNCTIONS
+#define TARGET_MEM_FUNCTIONS
+#endif
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "crt0.o%s"
diff --git a/contrib/gcc/config/rs6000/softfloat.h b/contrib/gcc/config/rs6000/softfloat.h
new file mode 100644
index 0000000..c0dc10d
--- /dev/null
+++ b/contrib/gcc/config/rs6000/softfloat.h
@@ -0,0 +1,24 @@
+/* Target definitions for GNU compiler for PowerPC defaulting to -msoft-float
+ Copyright (C) 2001
+ Free Software Foundation, Inc.
+ Contributed by Red Hat Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef CPP_FLOAT_DEFAULT_SPEC
+#define CPP_FLOAT_DEFAULT_SPEC "-D_SOFT_FLOAT"
diff --git a/contrib/gcc/config/rs6000/sol-ci.asm b/contrib/gcc/config/rs6000/sol-ci.asm
new file mode 100644
index 0000000..b168960
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sol-ci.asm
@@ -0,0 +1,104 @@
+# crti.s for sysv4
+
+# Copyright (C) 1996 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just supplies labeled starting points for the .got* and other
+# special sections. It is linked in first before other modules.
+
+ .file "scrti.s"
+ .ident "GNU C scrti.s"
+
+# Start of .text
+ .section ".text"
+ .globl _ex_text0
+_ex_text0:
+
+# Exception range
+ .section ".exception_ranges","aw"
+ .globl _ex_range0
+_ex_range0:
+
+# List of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_LIST__
+ .type __CTOR_LIST__,@object
+__CTOR_LIST__:
+
+# List of C++ destructors
+ .section ".dtors","aw"
+ .globl __DTOR_LIST__
+ .type __DTOR_LIST__,@object
+__DTOR_LIST__:
+
+# Head of _init function used for static constructors
+ .section ".init","ax"
+ .align 2
+ .globl _init
+ .type _init,@function
+_init: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# lwz %r3,_ex_shared0@got(%r31)
+# lwz %r4,-8(%r3) # _ex_register or 0
+# cmpi %cr0,%r4,0
+# beq .Lno_reg
+# mtlr %r4
+# blrl
+#.Lno_reg:
+
+# Head of _fini function used for static destructors
+ .section ".fini","ax"
+ .align 2
+ .globl _fini
+ .type _fini,@function
+_fini: stwu %r1,-16(%r1)
+ mflr %r0
+ stw %r31,12(%r1)
+ stw %r0,16(%r1)
+
+ bl _GLOBAL_OFFSET_TABLE_-4 # get the GOT address
+ mflr %r31
+
+# _environ and its evil twin environ, pointing to the environment
+ .section ".sdata","aw"
+ .align 2
+ .globl _environ
+ .space 4
+ .weak environ
+ .set environ,_environ
diff --git a/contrib/gcc/config/rs6000/sol-cn.asm b/contrib/gcc/config/rs6000/sol-cn.asm
new file mode 100644
index 0000000..b1da7d7
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sol-cn.asm
@@ -0,0 +1,82 @@
+# crtn.s for sysv4
+
+# Copyright (C) 1996 Free Software Foundation, Inc.
+# Written By Michael Meissner
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2, or (at your option) any
+# later version.
+#
+# In addition to the permissions in the GNU General Public License, the
+# Free Software Foundation gives you unlimited permission to link the
+# compiled version of this file with other programs, and to distribute
+# those programs without any restriction coming from the use of this
+# file. (The General Public License restrictions do apply in other
+# respects; for example, they cover modification of the file, and
+# distribution when not linked into another program.)
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; see the file COPYING. If not, write to
+# the Free Software Foundation, 59 Temple Place - Suite 330,
+# Boston, MA 02111-1307, USA.
+#
+# As a special exception, if you link this library with files
+# compiled with GCC to produce an executable, this does not cause
+# the resulting executable to be covered by the GNU General Public License.
+# This exception does not however invalidate any other reasons why
+# the executable file might be covered by the GNU General Public License.
+#
+
+# This file just supplies labeled ending points for the .got* and other
+# special sections. It is linked in last after other modules.
+
+ .file "scrtn.s"
+ .ident "GNU C scrtn.s"
+
+# Default versions of exception handling register/deregister
+ .weak _ex_register
+ .weak _ex_deregister
+ .set _ex_register,0
+ .set _ex_deregister,0
+
+# End list of C++ constructors
+ .section ".ctors","aw"
+ .globl __CTOR_END__
+ .type __CTOR_END__,@object
+__CTOR_END__:
+
+# End list of C++ destructors
+ .section ".dtors","aw"
+ .globl __DTOR_END__
+ .type __DTOR_END__,@object
+__DTOR_END__:
+
+ .section ".text"
+ .globl _ex_text1
+_ex_text1:
+
+ .section ".exception_ranges","aw"
+ .globl _ex_range1
+_ex_range1:
+
+# Tail of _init used for static constructors
+ .section ".init","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
+
+# Tail of _fini used for static destructors
+ .section ".fini","ax"
+ lwz %r0,16(%r1)
+ lwz %r31,12(%r1)
+ mtlr %r0
+ addi %r1,%r1,16
+ blr
diff --git a/contrib/gcc/config/rs6000/sysv4.h b/contrib/gcc/config/rs6000/sysv4.h
new file mode 100644
index 0000000..c20eaa0
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sysv4.h
@@ -0,0 +1,1458 @@
+/* Target definitions for GNU compiler for PowerPC running System V.4
+ Copyright (C) 1995, 1996, 1997, 1998, 1999, 2000, 2001
+ Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Yes! We are ELF. */
+#define TARGET_OBJECT_FORMAT OBJECT_ELF
+
+/* Default ABI to compile code for. */
+#define DEFAULT_ABI rs6000_current_abi
+
+/* Default ABI to use. */
+#define RS6000_ABI_NAME "sysv"
+
+/* Override rs6000.h definition. */
+#undef ASM_DEFAULT_SPEC
+#define ASM_DEFAULT_SPEC "-mppc"
+
+/* Override rs6000.h definition. */
+#undef CPP_DEFAULT_SPEC
+#define CPP_DEFAULT_SPEC "-D_ARCH_PPC"
+
+/* Small data support types. */
+enum rs6000_sdata_type {
+ SDATA_NONE, /* No small data support. */
+ SDATA_DATA, /* Just put data in .sbss/.sdata, don't use relocs. */
+ SDATA_SYSV, /* Use r13 to point to .sdata/.sbss. */
+ SDATA_EABI /* Use r13 like above, r2 points to .sdata2/.sbss2. */
+};
+
+extern enum rs6000_sdata_type rs6000_sdata;
+
+/* V.4/eabi switches. */
+#define MASK_NO_BITFIELD_TYPE 0x40000000 /* Set PCC_BITFIELD_TYPE_MATTERS to 0. */
+#define MASK_STRICT_ALIGN 0x20000000 /* Set STRICT_ALIGNMENT to 1. */
+#define MASK_RELOCATABLE 0x10000000 /* GOT pointers are PC relative. */
+#define MASK_EABI 0x08000000 /* Adhere to eabi, not System V spec. */
+#define MASK_LITTLE_ENDIAN 0x04000000 /* Target is little endian. */
+#define MASK_REGNAMES 0x02000000 /* Use alternate register names. */
+#define MASK_PROTOTYPE 0x01000000 /* Only prototyped fcns pass variable args. */
+#define MASK_NO_BITFIELD_WORD 0x00800000 /* Bitfields cannot cross word boundaries */
+
+#define TARGET_NO_BITFIELD_TYPE (target_flags & MASK_NO_BITFIELD_TYPE)
+#define TARGET_STRICT_ALIGN (target_flags & MASK_STRICT_ALIGN)
+#define TARGET_RELOCATABLE (target_flags & MASK_RELOCATABLE)
+#define TARGET_EABI (target_flags & MASK_EABI)
+#define TARGET_LITTLE_ENDIAN (target_flags & MASK_LITTLE_ENDIAN)
+#define TARGET_REGNAMES (target_flags & MASK_REGNAMES)
+#define TARGET_PROTOTYPE (target_flags & MASK_PROTOTYPE)
+#define TARGET_NO_BITFIELD_WORD (target_flags & MASK_NO_BITFIELD_WORD)
+#define TARGET_TOC ((target_flags & MASK_64BIT) \
+ || ((target_flags & (MASK_RELOCATABLE \
+ | MASK_MINIMAL_TOC)) \
+ && flag_pic > 1) \
+ || DEFAULT_ABI == ABI_AIX)
+
+#define TARGET_BITFIELD_TYPE (! TARGET_NO_BITFIELD_TYPE)
+#define TARGET_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+#define TARGET_NO_PROTOTYPE (! TARGET_PROTOTYPE)
+#define TARGET_NO_TOC (! TARGET_TOC)
+#define TARGET_NO_EABI (! TARGET_EABI)
+
+/* Strings provided by SUBTARGET_OPTIONS */
+extern const char *rs6000_abi_name;
+extern const char *rs6000_sdata_name;
+
+/* Override rs6000.h definition. */
+#undef SUBTARGET_OPTIONS
+#define SUBTARGET_OPTIONS \
+ { "call-", &rs6000_abi_name, N_("Select ABI calling convention") }, \
+ { "sdata=", &rs6000_sdata_name, N_("Select method for sdata handling") }
+
+/* Max # of bytes for variables to automatically be put into the .sdata
+ or .sdata2 sections. */
+extern int g_switch_value; /* Value of the -G xx switch. */
+extern int g_switch_set; /* Whether -G xx was passed. */
+
+#define SDATA_DEFAULT_SIZE 8
+
+/* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be just
+ the same as -mminimal-toc. */
+/* Override rs6000.h definition. */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ { "bit-align", -MASK_NO_BITFIELD_TYPE, \
+ N_("Align to the base type of the bit-field") }, \
+ { "no-bit-align", MASK_NO_BITFIELD_TYPE, \
+ N_("Don't align to the base type of the bit-field") }, \
+ { "strict-align", MASK_STRICT_ALIGN, \
+ N_("Don't assume that unaligned accesses are handled by the system") }, \
+ { "no-strict-align", -MASK_STRICT_ALIGN, \
+ N_("Assume that unaligned accesses are handled by the system") }, \
+ { "relocatable", MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC, \
+ N_("Produce code relocatable at runtime") }, \
+ { "no-relocatable", -MASK_RELOCATABLE, \
+ N_("Don't produce code relocatable at runtime") }, \
+ { "relocatable-lib", MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC, \
+ N_("Produce code relocatable at runtime") }, \
+ { "no-relocatable-lib", -MASK_RELOCATABLE, \
+ N_("Don't produce code relocatable at runtime") }, \
+ { "little-endian", MASK_LITTLE_ENDIAN, \
+ N_("Produce little endian code") }, \
+ { "little", MASK_LITTLE_ENDIAN, \
+ N_("Produce little endian code") }, \
+ { "big-endian", -MASK_LITTLE_ENDIAN, \
+ N_("Produce big endian code") }, \
+ { "big", -MASK_LITTLE_ENDIAN, \
+ N_("Produce big endian code") }, \
+ { "no-toc", 0, N_("no description yet") }, \
+ { "toc", MASK_MINIMAL_TOC, N_("no description yet") }, \
+ { "full-toc", MASK_MINIMAL_TOC, N_("no description yet") }, \
+ { "prototype", MASK_PROTOTYPE, N_("no description yet") }, \
+ { "no-prototype", -MASK_PROTOTYPE, N_("no description yet") }, \
+ { "no-traceback", 0, N_("no description yet") }, \
+ { "eabi", MASK_EABI, N_("Use EABI") }, \
+ { "no-eabi", -MASK_EABI, N_("Don't use EABI") }, \
+ { "bit-word", -MASK_NO_BITFIELD_WORD, "" }, \
+ { "no-bit-word", MASK_NO_BITFIELD_WORD, \
+ N_("Do not allow bit-fields to cross word boundaries") }, \
+ { "regnames", MASK_REGNAMES, \
+ N_("Use alternate register names") }, \
+ { "no-regnames", -MASK_REGNAMES, \
+ N_("Don't use alternate register names") }, \
+ { "sdata", 0, N_("no description yet") }, \
+ { "no-sdata", 0, N_("no description yet") }, \
+ { "sim", 0, \
+ N_("Link with libsim.a, libc.a and sim-crt0.o") }, \
+ { "ads", 0, \
+ N_("Link with libads.a, libc.a and crt0.o") }, \
+ { "yellowknife", 0, \
+ N_("Link with libyk.a, libc.a and crt0.o") }, \
+ { "mvme", 0, \
+ N_("Link with libmvme.a, libc.a and crt0.o") }, \
+ { "emb", 0, \
+ N_("Set the PPC_EMB bit in the ELF flags header") }, \
+ { "vxworks", 0, N_("no description yet") }, \
+ { "shlib", 0, N_("no description yet") }, \
+ EXTRA_SUBTARGET_SWITCHES \
+ { "newlib", 0, N_("no description yet") },
+
+/* This is meant to be redefined in the host dependent files. */
+#define EXTRA_SUBTARGET_SWITCHES
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ The macro SUBTARGET_OVERRIDE_OPTIONS is provided for subtargets, to
+ get control. */
+
+#define SUBTARGET_OVERRIDE_OPTIONS \
+do { \
+ if (!g_switch_set) \
+ g_switch_value = SDATA_DEFAULT_SIZE; \
+ \
+ if (!strcmp (rs6000_abi_name, "sysv")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "sysv-noeabi")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags &= ~ MASK_EABI; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "sysv-eabi") \
+ || !strcmp (rs6000_abi_name, "eabi")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags |= MASK_EABI; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "aix")) \
+ { \
+ rs6000_current_abi = ABI_AIX_NODESC; \
+ target_flags |= MASK_EABI; \
+ } \
+ else if (!strcmp (rs6000_abi_name, "aixdesc")) \
+ rs6000_current_abi = ABI_AIX; \
+ else if (!strcmp (rs6000_abi_name, "freebsd")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "linux")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "netbsd")) \
+ rs6000_current_abi = ABI_V4; \
+ else if (!strcmp (rs6000_abi_name, "i960-old")) \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ target_flags |= (MASK_LITTLE_ENDIAN | MASK_EABI \
+ | MASK_NO_BITFIELD_WORD); \
+ target_flags &= ~MASK_STRICT_ALIGN; \
+ } \
+ else \
+ { \
+ rs6000_current_abi = ABI_V4; \
+ error ("bad value for -mcall-%s", rs6000_abi_name); \
+ } \
+ \
+ if (rs6000_sdata_name) \
+ { \
+ if (!strcmp (rs6000_sdata_name, "none")) \
+ rs6000_sdata = SDATA_NONE; \
+ else if (!strcmp (rs6000_sdata_name, "data")) \
+ rs6000_sdata = SDATA_DATA; \
+ else if (!strcmp (rs6000_sdata_name, "default")) \
+ rs6000_sdata = (TARGET_EABI) ? SDATA_EABI : SDATA_SYSV; \
+ else if (!strcmp (rs6000_sdata_name, "sysv")) \
+ rs6000_sdata = SDATA_SYSV; \
+ else if (!strcmp (rs6000_sdata_name, "eabi")) \
+ rs6000_sdata = SDATA_EABI; \
+ else \
+ error ("bad value for -msdata=%s", rs6000_sdata_name); \
+ } \
+ else if (DEFAULT_ABI == ABI_V4) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ rs6000_sdata_name = "data"; \
+ } \
+ else \
+ { \
+ rs6000_sdata = SDATA_NONE; \
+ rs6000_sdata_name = "none"; \
+ } \
+ \
+ if (TARGET_RELOCATABLE && \
+ (rs6000_sdata == SDATA_EABI || rs6000_sdata == SDATA_SYSV)) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ error ("-mrelocatable and -msdata=%s are incompatible", \
+ rs6000_sdata_name); \
+ } \
+ \
+ else if (flag_pic && \
+ (rs6000_sdata == SDATA_EABI || rs6000_sdata == SDATA_SYSV)) \
+ { \
+ rs6000_sdata = SDATA_DATA; \
+ error ("-f%s and -msdata=%s are incompatible", \
+ (flag_pic > 1) ? "PIC" : "pic", \
+ rs6000_sdata_name); \
+ } \
+ \
+ if (rs6000_sdata != SDATA_NONE && DEFAULT_ABI != ABI_V4) \
+ { \
+ rs6000_sdata = SDATA_NONE; \
+ error ("-msdata=%s and -mcall-%s are incompatible", \
+ rs6000_sdata_name, rs6000_abi_name); \
+ } \
+ \
+ if (TARGET_RELOCATABLE && !TARGET_MINIMAL_TOC) \
+ { \
+ target_flags |= MASK_MINIMAL_TOC; \
+ error ("-mrelocatable and -mno-minimal-toc are incompatible"); \
+ } \
+ \
+ if (TARGET_RELOCATABLE && rs6000_current_abi == ABI_AIX) \
+ { \
+ target_flags &= ~MASK_RELOCATABLE; \
+ error ("-mrelocatable and -mcall-%s are incompatible", \
+ rs6000_abi_name); \
+ } \
+ \
+ if (flag_pic > 1 && rs6000_current_abi == ABI_AIX) \
+ { \
+ flag_pic = 0; \
+ error ("-fPIC and -mcall-%s are incompatible", \
+ rs6000_abi_name); \
+ } \
+ \
+ if (rs6000_current_abi == ABI_AIX && TARGET_LITTLE_ENDIAN) \
+ { \
+ target_flags &= ~MASK_LITTLE_ENDIAN; \
+ error ("-mcall-aixdesc must be big endian"); \
+ } \
+ \
+ /* Treat -fPIC the same as -mrelocatable. */ \
+ if (flag_pic > 1) \
+ target_flags |= MASK_RELOCATABLE | MASK_MINIMAL_TOC | MASK_NO_FP_IN_TOC; \
+ \
+ else if (TARGET_RELOCATABLE) \
+ flag_pic = 2; \
+ \
+} while (0)
+
+
+/* Override rs6000.h definition. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS)
+
+/* Override rs6000.h definition. */
+#undef PROCESSOR_DEFAULT
+#define PROCESSOR_DEFAULT PROCESSOR_PPC750
+
+#define FIXED_R2 1
+/* System V.4 uses register 13 as a pointer to the small data area,
+ so it is not available to the normal user. */
+#define FIXED_R13 1
+
+/* Size of the V.4 varargs area if needed. */
+/* Override rs6000.h definition. */
+#undef RS6000_VARARGS_AREA
+#define RS6000_VARARGS_AREA ((cfun->machine->sysv_varargs_p) ? RS6000_VARARGS_SIZE : 0)
+
+/* Override default big endianism definitions in rs6000.h. */
+#undef BYTES_BIG_ENDIAN
+#undef WORDS_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN)
+#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN)
+
+/* Define this to set the endianness to use in libgcc2.c, which can
+ not depend on target_flags. */
+#if !defined(_LITTLE_ENDIAN) && !defined(__sun__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#endif
+
+/* Define cutoff for using external functions to save floating point.
+ Currently on V.4, always use inline stores. */
+#define FP_SAVE_INLINE(FIRST_REG) ((FIRST_REG) < 64)
+
+/* Put jump tables in read-only memory, rather than in .text. */
+#define JUMP_TABLES_IN_TEXT_SECTION 0
+
+/* Prefix and suffix to use to saving floating point. */
+#define SAVE_FP_PREFIX "_savefpr_"
+#define SAVE_FP_SUFFIX "_l"
+
+/* Prefix and suffix to use to restoring floating point. */
+#define RESTORE_FP_PREFIX "_restfpr_"
+#define RESTORE_FP_SUFFIX "_l"
+
+/* Type used for ptrdiff_t, as a string used in a declaration. */
+#define PTRDIFF_TYPE "int"
+
+/* Type used for wchar_t, as a string used in a declaration. */
+/* Override svr4.h definition. */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+/* Width of wchar_t in bits. */
+/* Override svr4.h definition. */
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Make int foo : 8 not cause structures to be aligned to an int boundary. */
+/* Override elfos.h definition. */
+#undef PCC_BITFIELD_TYPE_MATTERS
+#define PCC_BITFIELD_TYPE_MATTERS (TARGET_BITFIELD_TYPE)
+
+#undef BITFIELD_NBYTES_LIMITED
+#define BITFIELD_NBYTES_LIMITED (TARGET_NO_BITFIELD_WORD)
+
+/* Define this macro to be the value 1 if instructions will fail to
+ work if given data not on the nominal alignment. If instructions
+ will merely go slower in that case, define this macro as 0. */
+#undef STRICT_ALIGNMENT
+#define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN)
+
+/* Alignment in bits of the stack boundary. Note, in order to allow building
+ one set of libraries with -mno-eabi instead of eabi libraries and non-eabi
+ versions, just use 64 as the stack boundary. */
+#undef STACK_BOUNDARY
+#define STACK_BOUNDARY (TARGET_ALTIVEC_ABI ? 128 : 64)
+
+/* Real stack boundary as mandated by the appropriate ABI. */
+#define ABI_STACK_BOUNDARY ((TARGET_EABI && !TARGET_ALTIVEC_ABI) ? 64 : 128)
+
+/* No data type wants to be aligned rounder than this. */
+#undef BIGGEST_ALIGNMENT
+#define BIGGEST_ALIGNMENT (TARGET_EABI ? 64 : 128)
+
+/* An expression for the alignment of a structure field FIELD if the
+ alignment computed in the usual way is COMPUTED. */
+#define ADJUST_FIELD_ALIGN(FIELD, COMPUTED) \
+ ((TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (FIELD)) == VECTOR_TYPE) \
+ ? 128 : COMPUTED)
+
+/* Define this macro as an expression for the alignment of a type
+ (given by TYPE as a tree node) if the alignment computed in the
+ usual way is COMPUTED and the alignment explicitly specified was
+ SPECIFIED. */
+#define ROUND_TYPE_ALIGN(TYPE, COMPUTED, SPECIFIED) \
+ ((TARGET_ALTIVEC && TREE_CODE (TYPE) == VECTOR_TYPE) \
+ ? 128 : MAX (COMPUTED, SPECIFIED))
+
+#undef BIGGEST_FIELD_ALIGNMENT
+#undef ADJUST_FIELD_ALIGN
+
+/* Use ELF style section commands. */
+
+#define TEXT_SECTION_ASM_OP "\t.section\t\".text\""
+
+#define DATA_SECTION_ASM_OP "\t.section\t\".data\""
+
+#define BSS_SECTION_ASM_OP "\t.section\t\".bss\""
+
+/* Override elfos.h definition. */
+#undef INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP "\t.section\t\".init\",\"ax\""
+
+/* Override elfos.h definition. */
+#undef FINI_SECTION_ASM_OP
+#define FINI_SECTION_ASM_OP "\t.section\t\".fini\",\"ax\""
+
+#define TOC_SECTION_ASM_OP "\t.section\t\".got\",\"aw\""
+
+/* Put PC relative got entries in .got2. */
+#define MINIMAL_TOC_SECTION_ASM_OP \
+ ((TARGET_RELOCATABLE || flag_pic) ? "\t.section\t\".got2\",\"aw\"" : "\t.section\t\".got1\",\"aw\"")
+
+#define SDATA_SECTION_ASM_OP "\t.section\t\".sdata\",\"aw\""
+#define SDATA2_SECTION_ASM_OP "\t.section\t\".sdata2\",\"a\""
+#define SBSS_SECTION_ASM_OP "\t.section\t\".sbss\",\"aw\",@nobits"
+
+/* Besides the usual ELF sections, we need a toc section. */
+/* Override elfos.h definition. */
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_toc, in_sdata, in_sdata2, in_sbss, in_init, in_fini
+
+/* Override elfos.h definition. */
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ TOC_SECTION_FUNCTION \
+ SDATA_SECTION_FUNCTION \
+ SDATA2_SECTION_FUNCTION \
+ SBSS_SECTION_FUNCTION \
+ INIT_SECTION_FUNCTION \
+ FINI_SECTION_FUNCTION
+
+#define TOC_SECTION_FUNCTION \
+void \
+toc_section () \
+{ \
+ if (in_section != in_toc) \
+ { \
+ in_section = in_toc; \
+ if (DEFAULT_ABI == ABI_AIX \
+ && TARGET_MINIMAL_TOC \
+ && !TARGET_RELOCATABLE) \
+ { \
+ if (! toc_initialized) \
+ { \
+ toc_initialized = 1; \
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP); \
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LCTOC", 0); \
+ fprintf (asm_out_file, "\t.tc "); \
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],"); \
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
+ fprintf (asm_out_file, "\n"); \
+ \
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
+ fprintf (asm_out_file, " = .+32768\n"); \
+ } \
+ else \
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
+ } \
+ else if (DEFAULT_ABI == ABI_AIX && !TARGET_RELOCATABLE) \
+ fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP); \
+ else \
+ { \
+ fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
+ if (! toc_initialized) \
+ { \
+ ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1"); \
+ fprintf (asm_out_file, " = .+32768\n"); \
+ toc_initialized = 1; \
+ } \
+ } \
+ } \
+} \
+ \
+extern int in_toc_section PARAMS ((void)); \
+int in_toc_section () \
+{ \
+ return in_section == in_toc; \
+}
+
+#define SDATA_SECTION_FUNCTION \
+void \
+sdata_section () \
+{ \
+ if (in_section != in_sdata) \
+ { \
+ in_section = in_sdata; \
+ fprintf (asm_out_file, "%s\n", SDATA_SECTION_ASM_OP); \
+ } \
+}
+
+#define SDATA2_SECTION_FUNCTION \
+void \
+sdata2_section () \
+{ \
+ if (in_section != in_sdata2) \
+ { \
+ in_section = in_sdata2; \
+ fprintf (asm_out_file, "%s\n", SDATA2_SECTION_ASM_OP); \
+ } \
+}
+
+#define SBSS_SECTION_FUNCTION \
+void \
+sbss_section () \
+{ \
+ if (in_section != in_sbss) \
+ { \
+ in_section = in_sbss; \
+ fprintf (asm_out_file, "%s\n", SBSS_SECTION_ASM_OP); \
+ } \
+}
+
+#define INIT_SECTION_FUNCTION \
+void \
+init_section () \
+{ \
+ if (in_section != in_init) \
+ { \
+ in_section = in_init; \
+ fprintf (asm_out_file, "%s\n", INIT_SECTION_ASM_OP); \
+ } \
+}
+
+#define FINI_SECTION_FUNCTION \
+void \
+fini_section () \
+{ \
+ if (in_section != in_fini) \
+ { \
+ in_section = in_fini; \
+ fprintf (asm_out_file, "%s\n", FINI_SECTION_ASM_OP); \
+ } \
+}
+
+/* A C statement or statements to switch to the appropriate section
+ for output of RTX in mode MODE. You can assume that RTX is some
+ kind of constant in RTL. The argument MODE is redundant except in
+ the case of a `const_int' rtx. Select the section by calling
+ `text_section' or one of the alternatives for other sections.
+
+ Do not define this macro if you put all constants in the read-only
+ data section. */
+
+/* Override elfos.h definition. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE, X, ALIGN) rs6000_select_rtx_section (MODE, X)
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+
+/* Override elfos.h definition. */
+#undef SELECT_SECTION
+#define SELECT_SECTION(DECL, RELOC, ALIGN) rs6000_select_section (DECL, RELOC)
+
+/* A C statement to build up a unique section name, expressed as a
+ STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
+ RELOC indicates whether the initial value of EXP requires
+ link-time relocations. If you do not define this macro, GCC will use
+ the symbol name prefixed by `.' as the section name. Note - this
+ macro can now be called for uninitialized data items as well as
+ initialised data and functions. */
+
+/* Override elfos.h definition. */
+#undef UNIQUE_SECTION
+#define UNIQUE_SECTION(DECL, RELOC) rs6000_unique_section (DECL, RELOC)
+
+/* Return non-zero if this entry is to be written into the constant pool
+ in a special way. We do so if this is a SYMBOL_REF, LABEL_REF or a CONST
+ containing one of them. If -mfp-in-toc (the default), we also do
+ this for floating-point constants. We actually can only do this
+ if the FP formats of the target and host machines are the same, but
+ we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h.
+
+ Unlike AIX, we don't key off of -mminimal-toc, but instead do not
+ allow floating point constants in the TOC if -mrelocatable. */
+
+#undef ASM_OUTPUT_SPECIAL_POOL_ENTRY_P
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (!TARGET_NO_FP_IN_TOC \
+ && !TARGET_RELOCATABLE \
+ && GET_CODE (X) == CONST_DOUBLE \
+ && GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && BITS_PER_WORD == HOST_BITS_PER_INT)))
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+extern int rs6000_pic_labelno;
+
+/* Override elfos.h definition. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ const char *const init_ptr = (TARGET_64BIT) ? ".quad" : ".long"; \
+ \
+ if (TARGET_RELOCATABLE \
+ && (get_pool_size () != 0 || current_function_profile) \
+ && uses_TOC()) \
+ { \
+ char buf[256]; \
+ \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "LCL", rs6000_pic_labelno); \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1); \
+ fprintf (FILE, "\t%s ", init_ptr); \
+ assemble_name (FILE, buf); \
+ putc ('-', FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno); \
+ assemble_name (FILE, buf); \
+ putc ('\n', FILE); \
+ } \
+ \
+ fprintf (FILE, "%s", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ \
+ if (DEFAULT_ABI == ABI_AIX) \
+ { \
+ const char *desc_name, *orig_name; \
+ \
+ STRIP_NAME_ENCODING (orig_name, NAME); \
+ desc_name = orig_name; \
+ while (*desc_name == '.') \
+ desc_name++; \
+ \
+ if (TREE_PUBLIC (DECL)) \
+ fprintf (FILE, "\t.globl %s\n", desc_name); \
+ \
+ fprintf (FILE, "%s\n", MINIMAL_TOC_SECTION_ASM_OP); \
+ fprintf (FILE, "%s:\n", desc_name); \
+ fprintf (FILE, "\t%s %s\n", init_ptr, orig_name); \
+ fprintf (FILE, "\t%s _GLOBAL_OFFSET_TABLE_\n", init_ptr); \
+ if (DEFAULT_ABI == ABI_AIX) \
+ fprintf (FILE, "\t%s 0\n", init_ptr); \
+ fprintf (FILE, "\t.previous\n"); \
+ } \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } while (0)
+
+/* A C compound statement that outputs the assembler code for a thunk function,
+ used to implement C++ virtual function calls with multiple inheritance. The
+ thunk acts as a wrapper around a virtual function, adjusting the implicit
+ object parameter before handing control off to the real function.
+
+ First, emit code to add the integer DELTA to the location that contains the
+ incoming first argument. Assume that this argument contains a pointer, and
+ is the one used to pass the this' pointer in C++. This is the incoming
+ argument *before* the function prologue, e.g. %o0' on a sparc. The
+ addition must preserve the values of all other incoming arguments.
+
+ After the addition, emit code to jump to FUNCTION, which is a
+ FUNCTION_DECL'. This is a direct pure jump, not a call, and does not touch
+ the return address. Hence returning from FUNCTION will return to whoever
+ called the current thunk'.
+
+ The effect must be as if FUNCTION had been called directly with the adjusted
+ first argument. This macro is responsible for emitting all of the code for
+ a thunk function; FUNCTION_PROLOGUE' and FUNCTION_EPILOGUE' are not
+ invoked.
+
+ The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already been
+ extracted from it.) It might possibly be useful on some targets, but
+ probably not.
+
+ If you do not define this macro, the target-independent code in the C++
+ frontend will generate a less efficient heavyweight thunk that calls
+ FUNCTION instead of jumping to it. The generic approach does not support
+ varargs. */
+
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+ output_mi_thunk (FILE, THUNK_FNDECL, DELTA, FUNCTION)
+
+/* The USER_LABEL_PREFIX stuff is affected by the -fleading-underscore
+ flag. The LOCAL_LABEL_PREFIX variable is used by dbxelf.h. */
+
+#define LOCAL_LABEL_PREFIX "."
+#define USER_LABEL_PREFIX ""
+
+/* svr4.h overrides ASM_OUTPUT_INTERNAL_LABEL. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ asm_fprintf (FILE, "%L%s", PREFIX)
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ (assemble_name (FILE, NAME), fputs (":\n", FILE))
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.globl ", FILE); \
+ assemble_name (FILE, NAME); putc ('\n', FILE);} while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP "\t.local\t"
+
+#define LCOMM_ASM_OP "\t.lcomm\t"
+
+/* Override elfos.h definition. */
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ if (rs6000_sdata != SDATA_NONE && (SIZE) > 0 \
+ && (SIZE) <= g_switch_value) \
+ { \
+ sbss_section (); \
+ ASM_OUTPUT_ALIGN (FILE, exact_log2 (ALIGN / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ ASM_OUTPUT_SKIP (FILE, SIZE); \
+ if (!flag_inhibit_size_directive && (SIZE) > 0) \
+ { \
+ fprintf (FILE, "%s", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", SIZE); \
+ } \
+ } \
+ else \
+ { \
+ fprintf (FILE, "%s", LCOMM_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ } \
+} while (0)
+
+/* Describe how to emit uninitialized external linkage items. */
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ ASM_GLOBALIZE_LABEL (FILE, NAME); \
+ ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+/* Switch Recognition by gcc.c. Add -G xx support. */
+
+/* Override svr4.h definition. */
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ((CHAR) == 'D' || (CHAR) == 'U' || (CHAR) == 'o' \
+ || (CHAR) == 'e' || (CHAR) == 'T' || (CHAR) == 'u' \
+ || (CHAR) == 'I' || (CHAR) == 'm' || (CHAR) == 'x' \
+ || (CHAR) == 'L' || (CHAR) == 'A' || (CHAR) == 'V' \
+ || (CHAR) == 'B' || (CHAR) == 'b' || (CHAR) == 'G')
+
+/* Output .file. */
+/* Override elfos.h definition. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+do { \
+ output_file_directive ((FILE), main_input_filename); \
+ rs6000_file_start (FILE, TARGET_CPU_DEFAULT); \
+} while (0)
+
+
+extern int fixuplabelno;
+
+/* Handle constructors specially for -mrelocatable. */
+#define TARGET_ASM_CONSTRUCTOR rs6000_elf_asm_out_constructor
+#define TARGET_ASM_DESTRUCTOR rs6000_elf_asm_out_destructor
+
+/* This is the end of what might become sysv4.h. */
+
+/* Use DWARF 2 debugging information by default. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Historically we have also supported stabs debugging. */
+#define DBX_DEBUGGING_INFO
+
+/* If we are referencing a function that is static or is known to be
+ in this file, make the SYMBOL_REF special. We can use this to indicate
+ that we can branch to this function without emitting a no-op after the
+ call. For real AIX calling sequences, we also replace the
+ function name with the real name (1 or 2 leading .'s), rather than
+ the function descriptor name. This saves a lot of overriding code
+ to read the prefixes. */
+
+#undef ENCODE_SECTION_INFO
+#define ENCODE_SECTION_INFO(DECL) rs6000_encode_section_info (DECL)
+
+/* The ELF version doesn't encode [DS] or whatever at the end of symbols. */
+
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ assemble_name (FILE, NAME)
+
+/* This macro gets just the user-specified name
+ out of the string in a SYMBOL_REF. Discard
+ a leading * or @. */
+#define STRIP_NAME_ENCODING(VAR,SYMBOL_NAME) \
+do { \
+ const char *_name = SYMBOL_NAME; \
+ while (*_name == '*' || *_name == '@') \
+ _name++; \
+ (VAR) = _name; \
+} while (0)
+
+/* This is how to output a reference to a user-level label named NAME.
+ `assemble_name' uses this. */
+
+/* Override elfos.h definition. */
+#undef ASM_OUTPUT_LABELREF
+#define ASM_OUTPUT_LABELREF(FILE,NAME) \
+do { \
+ const char *_name = NAME; \
+ if (*_name == '@') \
+ _name++; \
+ \
+ if (*_name == '*') \
+ fprintf (FILE, "%s", _name + 1); \
+ else \
+ asm_fprintf (FILE, "%U%s", _name); \
+} while (0)
+
+/* But, to make this work, we have to output the stabs for the function
+ name *first*... */
+
+#define DBX_FUNCTION_FIRST
+
+/* This is the end of what might become sysv4dbx.h. */
+
+#ifndef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (PowerPC System V.4)");
+#endif
+
+#ifndef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DPPC -Dunix -D__svr4__ -Asystem=unix -Asystem=svr4 -Acpu=powerpc -Amachine=powerpc"
+#endif
+
+/* Pass various options to the assembler. */
+/* Override svr4.h definition. */
+#undef ASM_SPEC
+#define ASM_SPEC "%(asm_cpu) \
+%{.s: %{mregnames} %{mno-regnames}} %{.S: %{mregnames} %{mno-regnames}} \
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+%{mrelocatable} %{mrelocatable-lib} %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{memb} %{!memb: %{msdata: -memb} %{msdata=eabi: -memb}} \
+%{mlittle} %{mlittle-endian} %{mbig} %{mbig-endian} \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-freebsd: -mbig} \
+ %{mcall-i960-old: -mlittle} \
+ %{mcall-linux: -mbig} \
+ %{mcall-netbsd: -mbig} \
+}}}}"
+
+#define CC1_ENDIAN_BIG_SPEC ""
+
+#define CC1_ENDIAN_LITTLE_SPEC "\
+%{!mstrict-align: %{!mno-strict-align: \
+ %{!mcall-i960-old: \
+ -mstrict-align \
+ } \
+}}"
+
+#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_big_spec)"
+
+/* Pass -G xxx to the compiler and set correct endian mode. */
+#define CC1_SPEC "%{G*} \
+%{mlittle: %(cc1_endian_little)} %{!mlittle: %{mlittle-endian: %(cc1_endian_little)}} \
+%{mbig: %(cc1_endian_big)} %{!mbig: %{mbig-endian: %(cc1_endian_big)}} \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-aixdesc: -mbig %(cc1_endian_big) } \
+ %{mcall-freebsd: -mbig %(cc1_endian_big) } \
+ %{mcall-i960-old: -mlittle %(cc1_endian_little) } \
+ %{mcall-linux: -mbig %(cc1_endian_big) } \
+ %{mcall-netbsd: -mbig %(cc1_endian_big) } \
+ %{!mcall-aixdesc: %{!mcall-freebsd: %{!mcall-i960-old: %{!mcall-linux: %{!mcall-netbsd: \
+ %(cc1_endian_default) \
+ }}}}} \
+}}}} \
+%{mno-sdata: -msdata=none } \
+%{meabi: %{!mcall-*: -mcall-sysv }} \
+%{!meabi: %{!mno-eabi: \
+ %{mrelocatable: -meabi } \
+ %{mcall-freebsd: -mno-eabi } \
+ %{mcall-i960-old: -meabi } \
+ %{mcall-linux: -mno-eabi } \
+ %{mcall-netbsd: -mno-eabi }}} \
+%{msdata: -msdata=default} \
+%{mno-sdata: -msdata=none} \
+%{profile: -p}"
+
+/* Don't put -Y P,<path> for cross compilers. */
+#ifndef CROSS_COMPILE
+#define LINK_PATH_SPEC "\
+%{!R*:%{L*:-R %*}} \
+%{!nostdlib: %{!YP,*: \
+ %{compat-bsd: \
+ %{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}} \
+ %{!R*: %{!L*: -R /usr/ucblib}} \
+ %{!compat-bsd: \
+ %{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ccs/lib:/usr/lib}}}}"
+
+#else
+#define LINK_PATH_SPEC ""
+#endif
+
+/* Default starting address if specified. */
+#define LINK_START_SPEC "\
+%{mads: %(link_start_ads) } \
+%{myellowknife: %(link_start_yellowknife) } \
+%{mmvme: %(link_start_mvme) } \
+%{msim: %(link_start_sim) } \
+%{mcall-freebsd: %(link_start_freebsd) } \
+%{mcall-linux: %(link_start_linux) } \
+%{mcall-netbsd: %(link_start_netbsd) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-linux: \
+ %{!mcall-netbsd: %{!mcall-freebsd: %(link_start_default) }}}}}}}"
+
+#define LINK_START_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{h*} %{v:-V} %{!msdata=none:%{G*}} %{msdata=none:-G0} \
+%{YP,*} %{R*} \
+%{Qy:} %{!Qn:-Qy} \
+%(link_shlib) \
+%{!Wl,-T*: %{!T*: %(link_start) }} \
+%(link_target) \
+%(link_os)"
+
+/* For now, turn off shared libraries by default. */
+#ifndef SHARED_LIB_SUPPORT
+#define NO_SHARED_LIB_SUPPORT
+#endif
+
+#ifndef NO_SHARED_LIB_SUPPORT
+/* Shared libraries are default. */
+#define LINK_SHLIB_SPEC "\
+%{!static: %(link_path) %{!R*:%{L*:-R %*}}} \
+%{mshlib: } \
+%{static:-dn -Bstatic} \
+%{shared:-G -dy -z text} \
+%{symbolic:-Bsymbolic -G -dy -z text}"
+
+#else
+/* Shared libraries are not default. */
+#define LINK_SHLIB_SPEC "\
+%{mshlib: %(link_path) } \
+%{!mshlib: %{!shared: %{!symbolic: -dn -Bstatic}}} \
+%{static: } \
+%{shared:-G -dy -z text %(link_path) } \
+%{symbolic:-Bsymbolic -G -dy -z text %(link_path) }"
+#endif
+
+/* Override the default target of the linker. */
+#define LINK_TARGET_SPEC "\
+%{mlittle: --oformat elf32-powerpcle } %{mlittle-endian: --oformat elf32-powerpcle } \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-i960-old: --oformat elf32-powerpcle} \
+ }}}}"
+
+/* Any specific OS flags. */
+#define LINK_OS_SPEC "\
+%{mads: %(link_os_ads) } \
+%{myellowknife: %(link_os_yellowknife) } \
+%{mmvme: %(link_os_mvme) } \
+%{msim: %(link_os_sim) } \
+%{mcall-freebsd: %(link_os_freebsd) } \
+%{mcall-linux: %(link_os_linux) } \
+%{mcall-netbsd: %(link_os_netbsd) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(link_os_default) }}}}}}}"
+
+#define LINK_OS_DEFAULT_SPEC ""
+
+#define CPP_SYSV_SPEC \
+"%{mrelocatable*: -D_RELOCATABLE} \
+%{fpic: -D__PIC__=1 -D__pic__=1} \
+%{!fpic: %{fPIC: -D__PIC__=2 -D__pic__=2}} \
+%{mlong-double-128: -D__LONG_DOUBLE_128__=1} \
+%{!mlong-double-64: %(cpp_longdouble_default)} \
+%{mcall-sysv: -D_CALL_SYSV} \
+%{mcall-aix: -D_CALL_AIX} %{mcall-aixdesc: -D_CALL_AIX -D_CALL_AIXDESC} \
+%{!mcall-sysv: %{!mcall-aix: %{!mcall-aixdesc: %(cpp_sysv_default) }}} \
+%{msoft-float: -D_SOFT_FLOAT} \
+%{!msoft-float: %{!mhard-float: \
+ %{mcpu=401: -D_SOFT_FLOAT} \
+ %{mcpu=403: -D_SOFT_FLOAT} \
+ %{mcpu=405: -D_SOFT_FLOAT} \
+ %{mcpu=ec603e: -D_SOFT_FLOAT} \
+ %{mcpu=801: -D_SOFT_FLOAT} \
+ %{mcpu=821: -D_SOFT_FLOAT} \
+ %{mcpu=823: -D_SOFT_FLOAT} \
+ %{mcpu=860: -D_SOFT_FLOAT} \
+ %{!mcpu*: %(cpp_float_default) }}}"
+
+/* Whether floating point is disabled by default. */
+#define CPP_FLOAT_DEFAULT_SPEC ""
+
+/* Whether 'long double' is 128 bits by default. */
+#define CPP_LONGDOUBLE_DEFAULT_SPEC ""
+
+#define CPP_SYSV_DEFAULT_SPEC "-D_CALL_SYSV"
+
+#define CPP_ENDIAN_BIG_SPEC "-D_BIG_ENDIAN -D__BIG_ENDIAN__ -Amachine=bigendian"
+
+#define CPP_ENDIAN_LITTLE_SPEC "-D_LITTLE_ENDIAN -D__LITTLE_ENDIAN__ -Amachine=littleendian"
+
+#define CPP_ENDIAN_SPEC \
+"%{mlittle: %(cpp_endian_little) } \
+%{mlittle-endian: %(cpp_endian_little) } \
+%{mbig: %(cpp_endian_big) } \
+%{mbig-endian: %(cpp_endian_big) } \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-freebsd: %(cpp_endian_big) } \
+ %{mcall-linux: %(cpp_endian_big) } \
+ %{mcall-netbsd: %(cpp_endian_big) } \
+ %{mcall-i960-old: %(cpp_endian_little) } \
+ %{mcall-aixdesc: %(cpp_endian_big) } \
+ %{!mcall-linux: %{!mcall-freebsd: %{!mcall-netbsd: %{!mcall-aixdesc: %(cpp_endian_default) }}}}}}}}"
+
+#define CPP_ENDIAN_DEFAULT_SPEC "%(cpp_endian_big)"
+
+/* Override rs6000.h definition. */
+#undef CPP_SPEC
+#define CPP_SPEC "%{posix: -D_POSIX_SOURCE} %(cpp_sysv) %(cpp_endian) %(cpp_cpu) \
+%{mads: %(cpp_os_ads) } \
+%{myellowknife: %(cpp_os_yellowknife) } \
+%{mmvme: %(cpp_os_mvme) } \
+%{msim: %(cpp_os_sim) } \
+%{mcall-freebsd: %(cpp_os_freebsd) } \
+%{mcall-linux: %(cpp_os_linux) } \
+%{mcall-netbsd: %(cpp_os_netbsd) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(cpp_os_default) }}}}}}}"
+
+#define CPP_OS_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+%{mads: %(startfile_ads) } \
+%{myellowknife: %(startfile_yellowknife) } \
+%{mmvme: %(startfile_mvme) } \
+%{msim: %(startfile_sim) } \
+%{mcall-freebsd: %(startfile_freebsd) } \
+%{mcall-linux: %(startfile_linux) } \
+%{mcall-netbsd: %(startfile_netbsd) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(startfile_default) }}}}}}}"
+
+#define STARTFILE_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef LIB_SPEC
+#define LIB_SPEC "\
+%{mads: %(lib_ads) } \
+%{myellowknife: %(lib_yellowknife) } \
+%{mmvme: %(lib_mvme) } \
+%{msim: %(lib_sim) } \
+%{mcall-freebsd: %(lib_freebsd) } \
+%{mcall-linux: %(lib_linux) } \
+%{mcall-netbsd: %(lib_netbsd) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %(lib_default) }}}}}}}"
+
+#define LIB_DEFAULT_SPEC ""
+
+/* Override svr4.h definition. */
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "\
+%{mads: %(endfile_ads)} \
+%{myellowknife: %(endfile_yellowknife)} \
+%{mmvme: %(endfile_mvme)} \
+%{msim: %(endfile_sim)} \
+%{mcall-freebsd: %(endfile_freebsd) } \
+%{mcall-linux: %(endfile_linux) } \
+%{mcall-netbsd: %(endfile_netbsd) } \
+%{mvxworks: %(endfile_vxworks) } \
+%{!mads: %{!myellowknife: %{!mmvme: %{!msim: %{!mcall-freebsd: %{!mcall-linux: %{!mcall-netbsd: %{!mvxworks: %(endfile_default) }}}}}}}}"
+
+#define ENDFILE_DEFAULT_SPEC ""
+
+/* Motorola ADS support. */
+#define LIB_ADS_SPEC "--start-group -lads -lc --end-group"
+
+#define STARTFILE_ADS_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_ADS_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_ADS_SPEC "-T ads.ld%s"
+
+#define LINK_OS_ADS_SPEC ""
+
+#define CPP_OS_ADS_SPEC ""
+
+/* Motorola Yellowknife support. */
+#define LIB_YELLOWKNIFE_SPEC "--start-group -lyk -lc --end-group"
+
+#define STARTFILE_YELLOWKNIFE_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_YELLOWKNIFE_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_YELLOWKNIFE_SPEC "-T yellowknife.ld%s"
+
+#define LINK_OS_YELLOWKNIFE_SPEC ""
+
+#define CPP_OS_YELLOWKNIFE_SPEC ""
+
+/* Motorola MVME support. */
+#define LIB_MVME_SPEC "--start-group -lmvme -lc --end-group"
+
+#define STARTFILE_MVME_SPEC "ecrti.o%s crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_MVME_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_MVME_SPEC "-Ttext 0x40000"
+
+#define LINK_OS_MVME_SPEC ""
+
+#define CPP_OS_MVME_SPEC ""
+
+/* PowerPC simulator based on netbsd system calls support. */
+#define LIB_SIM_SPEC "--start-group -lsim -lc --end-group"
+
+#define STARTFILE_SIM_SPEC "ecrti.o%s sim-crt0.o%s crtbegin.o%s"
+
+#define ENDFILE_SIM_SPEC "crtend.o%s ecrtn.o%s"
+
+#define LINK_START_SIM_SPEC ""
+
+#define LINK_OS_SIM_SPEC "-m elf32ppcsim"
+
+#define CPP_OS_SIM_SPEC ""
+
+/* FreeBSD support. */
+
+#define CPP_OS_FREEBSD_SPEC "\
+ -D__PPC__ -D__ppc__ -D__PowerPC__ -D__powerpc__ \
+ -Acpu=powerpc -Amachine=powerpc"
+
+#define STARTFILE_FREEBSD_SPEC FBSD_STARTFILE_SPEC
+#define ENDFILE_FREEBSD_SPEC FBSD_ENDFILE_SPEC
+#define LIB_FREEBSD_SPEC FBSD_LIB_SPEC
+#define LINK_START_FREEBSD_SPEC ""
+
+#define LINK_OS_FREEBSD_SPEC "\
+ %{symbolic:-Bsymbolic}"
+
+/* GNU/Linux support. */
+#ifdef USE_GNULIBC_1
+#define LIB_LINUX_SPEC "%{mnewlib: --start-group -llinux -lc --end-group } \
+%{!mnewlib: -lc }"
+#else
+#define LIB_LINUX_SPEC "%{mnewlib: --start-group -llinux -lc --end-group } \
+%{!mnewlib: %{shared:-lc} %{!shared: %{pthread:-lpthread } \
+%{profile:-lc_p} %{!profile:-lc}}}"
+#endif
+
+#define STARTFILE_LINUX_SPEC "\
+%{!shared: %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}} \
+%{mnewlib: ecrti.o%s} %{!mnewlib: crti.o%s} \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define ENDFILE_LINUX_SPEC "%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+%{mnewlib: ecrtn.o%s} %{!mnewlib: crtn.o%s}"
+
+#define LINK_START_LINUX_SPEC ""
+
+#define LINK_OS_LINUX_SPEC "-m elf32ppclinux %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}}}"
+
+#ifdef USE_GNULIBC_1
+#define CPP_OS_LINUX_SPEC "-D__unix__ -D__linux__ \
+%{!undef: \
+ %{!ansi: \
+ %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
+ %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
+-Asystem=unix -Asystem=posix"
+#else
+#define CPP_OS_LINUX_SPEC "-D__unix__ -D__linux__ \
+%{!undef: \
+ %{!ansi: \
+ %{!std=*:-Dunix -D__unix -Dlinux -D__linux} \
+ %{std=gnu*:-Dunix -D__unix -Dlinux -D__linux}}} \
+-Asystem=unix -Asystem=posix %{pthread:-D_REENTRANT}"
+#endif
+
+/* NetBSD support. */
+#define LIB_NETBSD_SPEC "\
+%{profile:-lgmon -lc_p} %{!profile:-lc}"
+
+#define STARTFILE_NETBSD_SPEC "\
+ncrti.o%s crt0.o%s \
+%{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+#define ENDFILE_NETBSD_SPEC "\
+%{!shared:crtend.o%s} %{shared:crtendS.o%s} \
+ncrtn.o%s"
+
+#define LINK_START_NETBSD_SPEC "\
+"
+
+#define LINK_OS_NETBSD_SPEC "\
+%{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.elf_so}}}"
+
+#define CPP_OS_NETBSD_SPEC "\
+-D__powerpc__ -D__NetBSD__ -D__ELF__ -D__KPRINTF_ATTRIBUTE__"
+
+/* VxWorks support. */
+/* VxWorks does all the library stuff itself. */
+#define LIB_VXWORKS_SPEC ""
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+
+#define STARTFILE_VXWORKS_SPEC ""
+
+#define ENDFILE_VXWORKS_SPEC ""
+
+/* Because it uses ld -r, vxworks has no start/end files, nor starting
+ address. */
+
+#define LINK_START_VXWORKS_SPEC ""
+
+#define LINK_OS_VXWORKS_SPEC "-r"
+
+#define CPP_OS_VXWORKS_SPEC "\
+-DCPU_FAMILY=PPC \
+%{!mcpu*: \
+ %{mpowerpc*: -DCPU=PPC603} \
+ %{!mno-powerpc: -DCPU=PPC603}} \
+%{mcpu=powerpc: -DCPU=PPC603} \
+%{mcpu=401: -DCPU=PPC403} \
+%{mcpu=403: -DCPU=PPC403} \
+%{mcpu=405: -DCPU=PPC405} \
+%{mcpu=601: -DCPU=PPC601} \
+%{mcpu=602: -DCPU=PPC603} \
+%{mcpu=603: -DCPU=PPC603} \
+%{mcpu=603e: -DCPU=PPC603} \
+%{mcpu=ec603e: -DCPU=PPC603} \
+%{mcpu=604: -DCPU=PPC604} \
+%{mcpu=604e: -DCPU=PPC604} \
+%{mcpu=620: -DCPU=PPC604} \
+%{mcpu=740: -DCPU=PPC603} \
+%{mcpu=7450: -DCPU=PPC603} \
+%{mcpu=750: -DCPU=PPC603} \
+%{mcpu=801: -DCPU=PPC603} \
+%{mcpu=821: -DCPU=PPC603} \
+%{mcpu=823: -DCPU=PPC603} \
+%{mcpu=860: -DCPU=PPC603}"
+
+/* Define any extra SPECS that the compiler needs to generate. */
+/* Override rs6000.h definition. */
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+ { "cpp_sysv", CPP_SYSV_SPEC }, \
+ { "cpp_sysv_default", CPP_SYSV_DEFAULT_SPEC }, \
+ { "cpp_endian_default", CPP_ENDIAN_DEFAULT_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "lib_ads", LIB_ADS_SPEC }, \
+ { "lib_yellowknife", LIB_YELLOWKNIFE_SPEC }, \
+ { "lib_mvme", LIB_MVME_SPEC }, \
+ { "lib_sim", LIB_SIM_SPEC }, \
+ { "lib_freebsd", LIB_FREEBSD_SPEC }, \
+ { "lib_linux", LIB_LINUX_SPEC }, \
+ { "lib_netbsd", LIB_NETBSD_SPEC }, \
+ { "lib_vxworks", LIB_VXWORKS_SPEC }, \
+ { "lib_default", LIB_DEFAULT_SPEC }, \
+ { "startfile_ads", STARTFILE_ADS_SPEC }, \
+ { "startfile_yellowknife", STARTFILE_YELLOWKNIFE_SPEC }, \
+ { "startfile_mvme", STARTFILE_MVME_SPEC }, \
+ { "startfile_sim", STARTFILE_SIM_SPEC }, \
+ { "startfile_freebsd", STARTFILE_FREEBSD_SPEC }, \
+ { "startfile_linux", STARTFILE_LINUX_SPEC }, \
+ { "startfile_netbsd", STARTFILE_NETBSD_SPEC }, \
+ { "startfile_vxworks", STARTFILE_VXWORKS_SPEC }, \
+ { "startfile_default", STARTFILE_DEFAULT_SPEC }, \
+ { "endfile_ads", ENDFILE_ADS_SPEC }, \
+ { "endfile_yellowknife", ENDFILE_YELLOWKNIFE_SPEC }, \
+ { "endfile_mvme", ENDFILE_MVME_SPEC }, \
+ { "endfile_sim", ENDFILE_SIM_SPEC }, \
+ { "endfile_freebsd", ENDFILE_FREEBSD_SPEC }, \
+ { "endfile_linux", ENDFILE_LINUX_SPEC }, \
+ { "endfile_netbsd", ENDFILE_NETBSD_SPEC }, \
+ { "endfile_vxworks", ENDFILE_VXWORKS_SPEC }, \
+ { "endfile_default", ENDFILE_DEFAULT_SPEC }, \
+ { "link_path", LINK_PATH_SPEC }, \
+ { "link_shlib", LINK_SHLIB_SPEC }, \
+ { "link_target", LINK_TARGET_SPEC }, \
+ { "link_start", LINK_START_SPEC }, \
+ { "link_start_ads", LINK_START_ADS_SPEC }, \
+ { "link_start_yellowknife", LINK_START_YELLOWKNIFE_SPEC }, \
+ { "link_start_mvme", LINK_START_MVME_SPEC }, \
+ { "link_start_sim", LINK_START_SIM_SPEC }, \
+ { "link_start_freebsd", LINK_START_FREEBSD_SPEC }, \
+ { "link_start_linux", LINK_START_LINUX_SPEC }, \
+ { "link_start_netbsd", LINK_START_NETBSD_SPEC }, \
+ { "link_start_vxworks", LINK_START_VXWORKS_SPEC }, \
+ { "link_start_default", LINK_START_DEFAULT_SPEC }, \
+ { "link_os", LINK_OS_SPEC }, \
+ { "link_os_ads", LINK_OS_ADS_SPEC }, \
+ { "link_os_yellowknife", LINK_OS_YELLOWKNIFE_SPEC }, \
+ { "link_os_mvme", LINK_OS_MVME_SPEC }, \
+ { "link_os_sim", LINK_OS_SIM_SPEC }, \
+ { "link_os_freebsd", LINK_OS_FREEBSD_SPEC }, \
+ { "link_os_linux", LINK_OS_LINUX_SPEC }, \
+ { "link_os_netbsd", LINK_OS_NETBSD_SPEC }, \
+ { "link_os_vxworks", LINK_OS_VXWORKS_SPEC }, \
+ { "link_os_default", LINK_OS_DEFAULT_SPEC }, \
+ { "cc1_endian_big", CC1_ENDIAN_BIG_SPEC }, \
+ { "cc1_endian_little", CC1_ENDIAN_LITTLE_SPEC }, \
+ { "cc1_endian_default", CC1_ENDIAN_DEFAULT_SPEC }, \
+ { "cpp_endian_big", CPP_ENDIAN_BIG_SPEC }, \
+ { "cpp_endian_little", CPP_ENDIAN_LITTLE_SPEC }, \
+ { "cpp_float_default", CPP_FLOAT_DEFAULT_SPEC }, \
+ { "cpp_longdouble_default", CPP_LONGDOUBLE_DEFAULT_SPEC }, \
+ { "cpp_os_ads", CPP_OS_ADS_SPEC }, \
+ { "cpp_os_yellowknife", CPP_OS_YELLOWKNIFE_SPEC }, \
+ { "cpp_os_mvme", CPP_OS_MVME_SPEC }, \
+ { "cpp_os_sim", CPP_OS_SIM_SPEC }, \
+ { "cpp_os_freebsd", CPP_OS_FREEBSD_SPEC }, \
+ { "cpp_os_linux", CPP_OS_LINUX_SPEC }, \
+ { "cpp_os_netbsd", CPP_OS_NETBSD_SPEC }, \
+ { "cpp_os_vxworks", CPP_OS_VXWORKS_SPEC }, \
+ { "cpp_os_default", CPP_OS_DEFAULT_SPEC },
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#define MULTILIB_DEFAULTS { "mbig", "mcall-sysv" }
+
+/* Define this macro if the code for function profiling should come
+ before the function prologue. Normally, the profiling code comes
+ after. */
+#define PROFILE_BEFORE_PROLOGUE 1
+
+/* Function name to call to do profiling. */
+#define RS6000_MCOUNT "_mcount"
+
+/* Define this macro (to a value of 1) if you want to support the
+ Win32 style pragmas #pragma pack(push,<n>)' and #pragma
+ pack(pop)'. The pack(push,<n>) pragma specifies the maximum
+ alignment (in bytes) of fields within a structure, in much the
+ same way as the __aligned__' and __packed__' __attribute__'s
+ do. A pack value of zero resets the behaviour to the default.
+ Successive invocations of this pragma cause the previous values to
+ be stacked, so that invocations of #pragma pack(pop)' will return
+ to the previous value. */
+
+#define HANDLE_PRAGMA_PACK_PUSH_POP 1
+
+/* Define library calls for quad FP operations. These are all part of the
+ PowerPC 32bit ABI. */
+#define ADDTF3_LIBCALL "_q_add"
+#define DIVTF3_LIBCALL "_q_div"
+#define EXTENDDFTF2_LIBCALL "_q_dtoq"
+#define EQTF2_LIBCALL "_q_feq"
+#define GETF2_LIBCALL "_q_fge"
+#define GTTF2_LIBCALL "_q_fgt"
+#define LETF2_LIBCALL "_q_fle"
+#define LTTF2_LIBCALL "_q_flt"
+#define NETF2_LIBCALL "_q_fne"
+#define FLOATSITF2_LIBCALL "_q_itoq"
+#define MULTF3_LIBCALL "_q_mul"
+#define NEGTF2_LIBCALL "_q_neg"
+#define TRUNCTFDF2_LIBCALL "_q_qtod"
+#define FIX_TRUNCTFSI2_LIBCALL "_q_qtoi"
+#define TRUNCTFSF2_LIBCALL "_q_qtos"
+#define FIXUNS_TRUNCTFSI2_LIBCALL "_q_qtou"
+#define SQRTTF_LIBCALL "_q_sqrt"
+#define EXTENDSFTF2_LIBCALL "_q_stoq"
+#define SUBTF3_LIBCALL "_q_sub"
+#define FLOATUNSSITF2_LIBCALL "_q_utoq"
+
+#define INIT_TARGET_OPTABS \
+ do { \
+ if (TARGET_HARD_FLOAT) \
+ { \
+ add_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (ADDTF3_LIBCALL); \
+ sub_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (SUBTF3_LIBCALL); \
+ neg_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (NEGTF2_LIBCALL); \
+ smul_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (MULTF3_LIBCALL); \
+ sdiv_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (DIVTF3_LIBCALL); \
+ eqtf2_libfunc = init_one_libfunc (EQTF2_LIBCALL); \
+ netf2_libfunc = init_one_libfunc (NETF2_LIBCALL); \
+ gttf2_libfunc = init_one_libfunc (GTTF2_LIBCALL); \
+ getf2_libfunc = init_one_libfunc (GETF2_LIBCALL); \
+ lttf2_libfunc = init_one_libfunc (LTTF2_LIBCALL); \
+ letf2_libfunc = init_one_libfunc (LETF2_LIBCALL); \
+ trunctfsf2_libfunc = init_one_libfunc (TRUNCTFSF2_LIBCALL); \
+ trunctfdf2_libfunc = init_one_libfunc (TRUNCTFDF2_LIBCALL); \
+ extendsftf2_libfunc = init_one_libfunc (EXTENDSFTF2_LIBCALL); \
+ extenddftf2_libfunc = init_one_libfunc (EXTENDDFTF2_LIBCALL); \
+ floatsitf_libfunc = init_one_libfunc (FLOATSITF2_LIBCALL); \
+ fixtfsi_libfunc = init_one_libfunc (FIX_TRUNCTFSI2_LIBCALL); \
+ fixunstfsi_libfunc \
+ = init_one_libfunc (FIXUNS_TRUNCTFSI2_LIBCALL); \
+ if (TARGET_PPC_GPOPT || TARGET_POWER2) \
+ sqrt_optab->handlers[(int) TFmode].libfunc \
+ = init_one_libfunc (SQRTTF_LIBCALL); \
+ } \
+ } while (0)
+
+/* Select a format to encode pointers in exception handling data. CODE
+ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is
+ true if the symbol may be affected by dynamic relocations. */
+#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
+ ((flag_pic || TARGET_RELOCATABLE) \
+ ? (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) \
+ : DW_EH_PE_absptr)
+
+#define TARGET_ASM_EXCEPTION_SECTION readonly_data_section
+
+#define DOUBLE_INT_ASM_OP "\t.quad\t"
+
+/* Generate entries in .fixup for relocatable addresses. */
+#define RELOCATABLE_NEEDS_FIXUP
diff --git a/contrib/gcc/config/rs6000/sysv4le.h b/contrib/gcc/config/rs6000/sysv4le.h
new file mode 100644
index 0000000..1c0daea
--- /dev/null
+++ b/contrib/gcc/config/rs6000/sysv4le.h
@@ -0,0 +1,49 @@
+/* Target definitions for GNU compiler for a little endian PowerPC
+ running System V.4
+ Copyright (C) 1995, 2000 Free Software Foundation, Inc.
+ Contributed by Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_POWERPC | MASK_NEW_MNEMONICS | MASK_LITTLE_ENDIAN)
+
+#undef CPP_ENDIAN_DEFAULT_SPEC
+#define CPP_ENDIAN_DEFAULT_SPEC "%(cpp_endian_little)"
+
+#undef CC1_ENDIAN_DEFAULT_SPEC
+#define CC1_ENDIAN_DEFAULT_SPEC "%(cc1_endian_little)"
+
+#undef LINK_TARGET_SPEC
+#define LINK_TARGET_SPEC "\
+%{mbig: --oformat elf32-powerpc } %{mbig-endian: --oformat elf32-powerpc } \
+%{!mlittle: %{!mlittle-endian: %{!mbig: %{!mbig-endian: \
+ %{mcall-linux: --oformat elf32-powerpc} \
+ }}}}"
+
+/* Define this macro as a C expression for the initializer of an
+ array of string to tell the driver program which options are
+ defaults for this target and thus do not need to be handled
+ specially when using `MULTILIB_OPTIONS'.
+
+ Do not define this macro if `MULTILIB_OPTIONS' is not defined in
+ the target makefile fragment or if none of the options listed in
+ `MULTILIB_OPTIONS' are set by default. *Note Target Fragment::. */
+
+#undef MULTILIB_DEFAULTS
+#define MULTILIB_DEFAULTS { "mlittle", "mcall-sysv" }
diff --git a/contrib/gcc/config/rs6000/t-aix43 b/contrib/gcc/config/rs6000/t-aix43
new file mode 100644
index 0000000..209a763
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-aix43
@@ -0,0 +1,74 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for pthread and all of the
+# different processor models
+
+MULTILIB_OPTIONS = pthread \
+ mcpu=common/mcpu=power/mcpu=powerpc/maix64
+
+MULTILIB_DIRNAMES = pthread \
+ common power powerpc ppc64
+
+MULTILIB_MATCHES = mcpu?power=mpower \
+ mcpu?power=mrios1 \
+ mcpu?power=mcpu?rios1 \
+ mcpu?power=mcpu?rsc \
+ mcpu?power=mcpu?rsc1 \
+ mcpu?power=mpower2 \
+ mcpu?power=mrios2 \
+ mcpu?power=mcpu?rios2 \
+ mcpu?powerpc=mcpu?rs64a \
+ mcpu?powerpc=mcpu?601 \
+ mcpu?powerpc=mcpu?602 \
+ mcpu?powerpc=mcpu?603 \
+ mcpu?powerpc=mcpu?603e \
+ mcpu?powerpc=mcpu?604 \
+ mcpu?powerpc=mcpu?620 \
+ mcpu?powerpc=mcpu?630 \
+ mcpu?powerpc=mcpu?750 \
+ mcpu?powerpc=mcpu?403 \
+ mcpu?powerpc=mpowerpc \
+ mcpu?powerpc=mpowerpc-gpopt \
+ mcpu?powerpc=mpowerpc-gfxopt
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Build a shared libgcc library.
+SHLIB_EXT = .a
+SHLIB_LINK = $(GCC_FOR_TARGET) $(LIBGCC2_CFLAGS) -shared -nodefaultlibs \
+ -Wl,-bE:@shlib_map_file@ -o @multilib_dir@/shr.o \
+ @multilib_flags@ @shlib_objs@ -lc \
+ `case @shlib_base_name@ in \
+ *pthread*) echo -L/usr/lib/threads -lpthreads -lc_r /usr/lib/libc.a ;; \
+ *) echo -lc ;; esac` ; \
+ rm -f tmp-@shlib_base_name@.a ; \
+ $(AR_CREATE_FOR_TARGET) tmp-@shlib_base_name@.a @multilib_dir@/shr.o ; \
+ mv tmp-@shlib_base_name@.a @shlib_base_name@.a ; \
+ rm -f @multilib_dir@/shr.o
+# $(slibdir) double quoted to protect it from expansion while building
+# libgcc.mk. We want this delayed until actual install time.
+SHLIB_INSTALL = $(INSTALL_DATA) @shlib_base_name@.a $$(slibdir)/
+SHLIB_LIBS = -lc `case @shlib_base_name@ in *pthread*) echo -lpthread ;; esac`
+SHLIB_MKMAP = $(srcdir)/mkmap-flat.awk
+SHLIB_MAPFILES = $(srcdir)/libgcc-std.ver
+SHLIB_NM_FLAGS = -Bpg
+
+# Either 32-bit and 64-bit objects in archives.
+AR_FLAGS_FOR_TARGET = -X32_64
+
+# Compile Ada files with minimal-toc. The primary focus is gnatlib, so
+# that the library does not use nearly the entire TOC of applications
+# until gnatlib is built as a shared library on AIX. Compiling the
+# compiler with -mminimal-toc does not cause any harm.
+T_ADAFLAGS = -mminimal-toc
diff --git a/contrib/gcc/config/rs6000/t-beos b/contrib/gcc/config/rs6000/t-beos
new file mode 100644
index 0000000..1d4fbf7
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-beos
@@ -0,0 +1,48 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for both hard and soft floating point
+
+MULTILIB_OPTIONS = msoft-float mcpu=common
+MULTILIB_DIRNAMES = soft-float common
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# This is probably the correct define, to override the Makefile
+# default, but using it causes more problems than it solves.
+#
+# Using it will cause fixincludes to try and fix some of the
+# mwcc header files, which it seems to do a poor job of. On
+# the other hand, not using it will only cause the gcc version
+# of limits.h to lack the header and trailer parts that are
+# tacked on to it when there is a limits.h in the system header
+# dir.
+#
+# For now it is probably better to change the rule for
+# LIMITS_H_TEST to explicitly look for the BeOS limits.h.
+# If the gcc limits.h is not set up to #include_next the
+# BeOS limits.h, then some things will go undefined, like
+# PATH_MAX.
+
+#SYSTEM_HEADER_DIR=/boot/develop/headers/posix
+# Test to see whether <limits.h> exists in the system header files.
+LIMITS_H_TEST = [ -f /boot/develop/headers/posix/limits.h ]
+
+# Aix 3.2.x needs milli.exp for -mcpu=common
+EXTRA_PARTS = milli.exp
+milli.exp: $(srcdir)/config/rs6000/milli.exp
+ rm -f milli.exp
+ cp $(srcdir)/config/rs6000/milli.exp ./milli.exp
+
+# Don't use collect.
+USE_COLLECT2 =
diff --git a/contrib/gcc/config/rs6000/t-darwin b/contrib/gcc/config/rs6000/t-darwin
new file mode 100644
index 0000000..c9c392e
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-darwin
@@ -0,0 +1,33 @@
+# Library code must include trampoline support
+LIB2FUNCS_EXTRA = $(srcdir)/config/rs6000/darwin-tramp.asm
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+darwin.o: $(srcdir)/config/darwin.c $(CONFIG_H) $(SYSTEM_H) $(RTL_BASE_H) \
+ $(REGS_H) hard-reg-set.h insn-config.h conditions.h output.h \
+ insn-attr.h flags.h $(TREE_H) $(EXPR_H) reload.h \
+ function.h $(GGC_H) $(TM_P_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+darwin-c.o: $(srcdir)/config/darwin-c.c $(CONFIG_H) $(SYSTEM_H) \
+ $(TREE_H) $(C_TREE_H) c-lex.h c-pragma.h toplev.h cpplib.h \
+ $(TM_P_H)
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+
+# Build the libraries for both hard and soft floating point
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = soft-float
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/rs6000/t-newas b/contrib/gcc/config/rs6000/t-newas
new file mode 100644
index 0000000..b8e715a
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-newas
@@ -0,0 +1,49 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for both hard and soft floating point and all of the
+# different processor models
+
+MULTILIB_OPTIONS = msoft-float \
+ mcpu=common/mcpu=power/mcpu=powerpc
+
+MULTILIB_DIRNAMES = soft-float \
+ common power powerpc
+
+MULTILIB_MATCHES = msoft-float=mcpu?403 \
+ mcpu?power=mpower \
+ mcpu?power=mrios1 \
+ mcpu?power=mcpu?rios1 \
+ mcpu?power=mcpu?rsc \
+ mcpu?power=mcpu?rsc1 \
+ mcpu?power=mpower2 \
+ mcpu?power=mrios2 \
+ mcpu?power=mcpu=rios2 \
+ mcpu?powerpc=mcpu?601 \
+ mcpu?powerpc=mcpu?602 \
+ mcpu?powerpc=mcpu?603 \
+ mcpu?powerpc=mcpu?603e \
+ mcpu?powerpc=mcpu?604 \
+ mcpu?powerpc=mcpu?620 \
+ mcpu?powerpc=mcpu?403 \
+ mcpu?powerpc=mpowerpc \
+ mcpu?powerpc=mpowerpc-gpopt \
+ mcpu?powerpc=mpowerpc-gfxopt
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Aix 3.2.x needs milli.exp for -mcpu=common
+EXTRA_PARTS = milli.exp
+milli.exp: $(srcdir)/config/rs6000/milli.exp
+ rm -f milli.exp
+ cp $(srcdir)/config/rs6000/milli.exp ./milli.exp
diff --git a/contrib/gcc/config/rs6000/t-ppccomm b/contrib/gcc/config/rs6000/t-ppccomm
new file mode 100644
index 0000000..e264586
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-ppccomm
@@ -0,0 +1,78 @@
+# Common support for PowerPC ELF targets (both EABI and SVR4).
+
+LIB2FUNCS_EXTRA = tramp.S
+
+# This one can't end up in shared libgcc
+LIB2FUNCS_STATIC_EXTRA = eabi.S
+
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+eabi.S: $(srcdir)/config/rs6000/eabi.asm
+ cat $(srcdir)/config/rs6000/eabi.asm > eabi.S
+
+tramp.S: $(srcdir)/config/rs6000/tramp.asm
+ cat $(srcdir)/config/rs6000/tramp.asm > tramp.S
+
+# Switch synonyms
+MULTILIB_MATCHES_FLOAT = msoft-float=mcpu?401 \
+ msoft-float=mcpu?403 \
+ msoft-float=mcpu?ec603e \
+ msoft-float=mcpu?801 \
+ msoft-float=mcpu?821 \
+ msoft-float=mcpu?823 \
+ msoft-float=mcpu?860
+MULTILIB_MATCHES_ENDIAN = mlittle=mlittle-endian mbig=mbig-endian
+MULTILIB_MATCHES_SYSV = mcall-sysv=mcall-sysv-eabi mcall-sysv=mcall-sysv-noeabi mcall-sysv=mcall-linux mcall-sysv=mcall-netbsd
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+EXTRA_MULTILIB_PARTS = crtbegin$(objext) crtend$(objext) \
+ crtbeginS$(objext) crtendS$(objext) \
+ ecrti$(objext) ecrtn$(objext) \
+ ncrti$(objext) ncrtn$(objext)
+
+# We build {e,n}crti.o and {e,n}crtn.o, which serve to add begin and
+# end labels to all of the special sections used when we link using gcc.
+
+# Assemble startup files.
+ecrti.S: $(srcdir)/config/rs6000/eabi-ci.asm
+ cat $(srcdir)/config/rs6000/eabi-ci.asm >ecrti.S
+
+ecrtn.S: $(srcdir)/config/rs6000/eabi-cn.asm
+ cat $(srcdir)/config/rs6000/eabi-cn.asm >ecrtn.S
+
+ncrti.S: $(srcdir)/config/rs6000/sol-ci.asm
+ cat $(srcdir)/config/rs6000/sol-ci.asm >ncrti.S
+
+ncrtn.S: $(srcdir)/config/rs6000/sol-cn.asm
+ cat $(srcdir)/config/rs6000/sol-cn.asm >ncrtn.S
+
+# Build multiple copies of ?crt{i,n}.o, one for each target switch.
+$(T)ecrti$(objext): ecrti.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrti.S -o $(T)ecrti$(objext)
+
+$(T)ecrtn$(objext): ecrtn.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ecrtn.S -o $(T)ecrtn$(objext)
+
+$(T)ncrti$(objext): ncrti.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrti.S -o $(T)ncrti$(objext)
+
+$(T)ncrtn$(objext): ncrtn.S
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) $(MULTILIB_CFLAGS) -c ncrtn.S -o $(T)ncrtn$(objext)
+
+# It is important that crtbegin.o, etc., aren't surprised by stuff in .sdata.
+CRTSTUFF_T_CFLAGS = -msdata=none
+# Make sure crt*.o are built with -fPIC even if configured with
+# --enable-shared --disable-multilib
+CRTSTUFF_T_CFLAGS_S = -fPIC -msdata=none
diff --git a/contrib/gcc/config/rs6000/t-ppcgas b/contrib/gcc/config/rs6000/t-ppcgas
new file mode 100644
index 0000000..9b82b9f
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-ppcgas
@@ -0,0 +1,19 @@
+# Multilibs for powerpc embedded ELF targets.
+
+MULTILIB_OPTIONS = msoft-float \
+ mlittle/mbig \
+ mcall-sysv/mcall-aix \
+ fleading-underscore
+
+MULTILIB_DIRNAMES = nof \
+ le be \
+ cs ca \
+ und
+
+MULTILIB_EXTRA_OPTS = mrelocatable-lib mno-eabi mstrict-align
+MULTILIB_EXCEPTIONS = *mcall-aix/*fleading-underscore* \
+ *mlittle/*mcall-aix*
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT} \
+ ${MULTILIB_MATCHES_ENDIAN} \
+ ${MULTILIB_MATCHES_SYSV}
diff --git a/contrib/gcc/config/rs6000/t-ppcos b/contrib/gcc/config/rs6000/t-ppcos
new file mode 100644
index 0000000..819863b
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-ppcos
@@ -0,0 +1,8 @@
+# Multilibs for a powerpc hosted ELF target (linux, SVR4)
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = nof
+MULTILIB_EXTRA_OPTS = fPIC mstrict-align
+MULTILIB_EXCEPTIONS =
+
+MULTILIB_MATCHES = ${MULTILIB_MATCHES_FLOAT}
diff --git a/contrib/gcc/config/rs6000/t-rs6000 b/contrib/gcc/config/rs6000/t-rs6000
new file mode 100644
index 0000000..f50ef17
--- /dev/null
+++ b/contrib/gcc/config/rs6000/t-rs6000
@@ -0,0 +1,19 @@
+# We want fine grained libraries, so use the new code to build the
+# floating point emulation libraries.
+FPBIT = fp-bit.c
+DPBIT = dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# Build the libraries for both hard and soft floating point
+
+MULTILIB_OPTIONS = msoft-float
+MULTILIB_DIRNAMES = soft-float
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/rs6000/tramp.asm b/contrib/gcc/config/rs6000/tramp.asm
new file mode 100644
index 0000000..c2a38d1
--- /dev/null
+++ b/contrib/gcc/config/rs6000/tramp.asm
@@ -0,0 +1,109 @@
+/* Special support for trampolines
+ *
+ * Copyright (C) 1996, 1997, 2000 Free Software Foundation, Inc.
+ * Written By Michael Meissner
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * In addition to the permissions in the GNU General Public License, the
+ * Free Software Foundation gives you unlimited permission to link the
+ * compiled version of this file with other programs, and to distribute
+ * those programs without any restriction coming from the use of this
+ * file. (The General Public License restrictions do apply in other
+ * respects; for example, they cover modification of the file, and
+ * distribution when not linked into another program.)
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 59 Temple Place - Suite 330,
+ * Boston, MA 02111-1307, USA.
+ *
+ * As a special exception, if you link this library with files
+ * compiled with GCC to produce an executable, this does not cause
+ * the resulting executable to be covered by the GNU General Public License.
+ * This exception does not however invalidate any other reasons why
+ * the executable file might be covered by the GNU General Public License.
+ */
+
+/* Set up trampolines. */
+
+ .file "tramp.asm"
+ .section ".text"
+ #include "ppc-asm.h"
+
+ .type trampoline_initial,@object
+ .align 2
+trampoline_initial:
+ mflr r0
+ bl 1f
+.Lfunc = .-trampoline_initial
+ .long 0 /* will be replaced with function address */
+.Lchain = .-trampoline_initial
+ .long 0 /* will be replaced with static chain */
+1: mflr r11
+ mtlr r0
+ lwz r0,0(r11) /* function address */
+ lwz r11,4(r11) /* static chain */
+ mtctr r0
+ bctr
+
+trampoline_size = .-trampoline_initial
+ .size trampoline_initial,trampoline_size
+
+
+/* R3 = stack address to store trampoline */
+/* R4 = length of trampoline area */
+/* R5 = function address */
+/* R6 = static chain */
+
+FUNC_START(__trampoline_setup)
+ mflr r0 /* save return address */
+ bl .LCF0 /* load up __trampoline_initial into r7 */
+.LCF0:
+ mflr r11
+ addi r7,r11,trampoline_initial-4-.LCF0 /* trampoline address -4 */
+
+ li r8,trampoline_size /* verify that the trampoline is big enough */
+ cmpw cr1,r8,r4
+ srwi r4,r4,2 /* # words to move */
+ addi r9,r3,-4 /* adjust pointer for lwzu */
+ mtctr r4
+ blt cr1,.Labort
+
+ mtlr r0
+
+ /* Copy the instructions to the stack */
+.Lmove:
+ lwzu r10,4(r7)
+ stwu r10,4(r9)
+ bdnz .Lmove
+
+ /* Store correct function and static chain */
+ stw r5,.Lfunc(r3)
+ stw r6,.Lchain(r3)
+
+ /* Now flush both caches */
+ mtctr r4
+.Lcache:
+ icbi 0,r3
+ dcbf 0,r3
+ addi r3,r3,4
+ bdnz .Lcache
+
+ /* Finally synchronize things & return */
+ sync
+ isync
+ blr
+
+.Labort:
+ bl JUMP_TARGET(abort)
+FUNC_END(__trampoline_setup)
+
diff --git a/contrib/gcc/config/rs6000/vxppc.h b/contrib/gcc/config/rs6000/vxppc.h
new file mode 100644
index 0000000..b13f6f5
--- /dev/null
+++ b/contrib/gcc/config/rs6000/vxppc.h
@@ -0,0 +1,56 @@
+/* Definitions of target machine for GNU compiler. Vxworks PowerPC version.
+ Copyright (C) 1996, 2000 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file just exists to give specs for the PowerPC running on VxWorks. */
+
+/* Reset defaults */
+#undef CPP_OS_DEFAULT_SPEC
+#define CPP_OS_DEFAULT_SPEC "%(cpp_os_vxworks)"
+
+#undef LIB_DEFAULT_SPEC
+#define LIB_DEFAULT_SPEC "%(lib_vxworks)"
+
+#undef STARTFILE_DEFAULT_SPEC
+#define STARTFILE_DEFAULT_SPEC "%(startfile_vxworks)"
+
+#undef ENDFILE_DEFAULT_SPEC
+#define ENDFILE_DEFAULT_SPEC "%(endfile_vxworks)"
+
+#undef LINK_START_DEFAULT_SPEC
+#define LINK_START_DEFAULT_SPEC "%(link_start_vxworks)"
+
+#undef LINK_OS_DEFAULT_SPEC
+#define LINK_OS_DEFAULT_SPEC "%(link_os_vxworks)"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-D__vxworks -D__vxworks__ -Asystem=vxworks -Asystem=embedded \
+-Acpu=powerpc -Amachine=powerpc"
+
+/* Don't define _LITTLE_ENDIAN or _BIG_ENDIAN */
+#undef CPP_ENDIAN_BIG_SPEC
+#define CPP_ENDIAN_BIG_SPEC "-D__BIG_ENDIAN__ -Amachine=bigendian"
+
+#undef CPP_ENDIAN_LITTLE_SPEC
+#define CPP_ENDIAN_LITTLE_SPEC "-D__LITTLE_ENDIAN__ -Amachine=littleendian"
+
+/* We use stabs-in-elf for debugging */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/rs6000/xcoff.h b/contrib/gcc/config/rs6000/xcoff.h
new file mode 100644
index 0000000..e60f3fe
--- /dev/null
+++ b/contrib/gcc/config/rs6000/xcoff.h
@@ -0,0 +1,520 @@
+/* Definitions of target machine for GNU compiler,
+ for some generic XCOFF file format
+ Copyright (C) 2001 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+#define TARGET_OBJECT_FORMAT OBJECT_XCOFF
+
+/* The AIX linker will discard static constructors in object files before
+ collect has a chance to see them, so scan the object files directly. */
+#define COLLECT_EXPORT_LIST
+
+/* The RS/6000 uses the XCOFF format. */
+#define XCOFF_DEBUGGING_INFO
+
+/* Define if the object format being used is COFF or a superset. */
+#define OBJECT_FORMAT_COFF
+
+/* Define the magic numbers that we recognize as COFF.
+
+ AIX 4.3 adds U803XTOCMAGIC (0757) for 64-bit objects and AIX V5 adds
+ U64_TOCMAGIC (0767), but collect2.c does not include files in the
+ correct order to conditionally define the symbolic name in this macro.
+
+ The AIX linker accepts import/export files as object files,
+ so accept "#!" (0x2321) magic number. */
+#define MY_ISCOFF(magic) \
+ ((magic) == U802WRMAGIC || (magic) == U802ROMAGIC \
+ || (magic) == U802TOCMAGIC || (magic) == 0757 || (magic) == 0767 \
+ || (magic) == 0x2321)
+
+/* We don't have GAS for the RS/6000 yet, so don't write out special
+ .stabs in cc1plus. */
+
+#define FASCIST_ASSEMBLER
+
+/* We define this to prevent the name mangler from putting dollar signs into
+ function names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* We define this to 0 so that gcc will never accept a dollar sign in a
+ variable name. This is needed because the AIX assembler will not accept
+ dollar signs. */
+
+#define DOLLARS_IN_IDENTIFIERS 0
+
+/* Define the extra sections we need. We define three: one is the read-only
+ data section which is used for constants. This is a csect whose name is
+ derived from the name of the input file. The second is for initialized
+ global variables. This is a csect whose name is that of the variable.
+ The third is the TOC. */
+
+#define EXTRA_SECTIONS \
+ read_only_data, private_data, read_only_private_data, toc, bss
+
+/* Define the routines to implement these extra sections.
+ BIGGEST_ALIGNMENT is 64, so align the sections that much. */
+
+#define EXTRA_SECTION_FUNCTIONS \
+ \
+void \
+read_only_data_section () \
+{ \
+ if (in_section != read_only_data) \
+ { \
+ fprintf (asm_out_file, "\t.csect %s[RO],3\n", \
+ xcoff_read_only_section_name); \
+ in_section = read_only_data; \
+ } \
+} \
+ \
+void \
+private_data_section () \
+{ \
+ if (in_section != private_data) \
+ { \
+ fprintf (asm_out_file, "\t.csect %s[RW],3\n", \
+ xcoff_private_data_section_name); \
+ in_section = private_data; \
+ } \
+} \
+ \
+void \
+read_only_private_data_section () \
+{ \
+ if (in_section != read_only_private_data) \
+ { \
+ fprintf (asm_out_file, "\t.csect %s[RO],3\n", \
+ xcoff_private_data_section_name); \
+ in_section = read_only_private_data; \
+ } \
+} \
+ \
+void \
+toc_section () \
+{ \
+ if (TARGET_MINIMAL_TOC) \
+ { \
+ /* toc_section is always called at least once from ASM_FILE_START, \
+ so this is guaranteed to always be defined once and only once \
+ in each file. */ \
+ if (! toc_initialized) \
+ { \
+ fputs ("\t.toc\nLCTOC..1:\n", asm_out_file); \
+ fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file); \
+ toc_initialized = 1; \
+ } \
+ \
+ if (in_section != toc) \
+ fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n", \
+ (TARGET_32BIT ? "" : ",3")); \
+ } \
+ else \
+ { \
+ if (in_section != toc) \
+ fputs ("\t.toc\n", asm_out_file); \
+ } \
+ in_section = toc; \
+}
+
+/* Define the name of our readonly data section. */
+
+#define READONLY_DATA_SECTION read_only_data_section
+
+/* Select the section for an initialized data object.
+
+ On the RS/6000, we have a special section for all variables except those
+ that are static. */
+
+#define SELECT_SECTION(EXP,RELOC,ALIGN) \
+{ \
+ if ((TREE_CODE (EXP) == STRING_CST \
+ && ! flag_writable_strings) \
+ || (TREE_CODE_CLASS (TREE_CODE (EXP)) == 'd' \
+ && TREE_READONLY (EXP) && ! TREE_THIS_VOLATILE (EXP) \
+ && DECL_INITIAL (EXP) \
+ && (DECL_INITIAL (EXP) == error_mark_node \
+ || TREE_CONSTANT (DECL_INITIAL (EXP))) \
+ && ! (RELOC))) \
+ { \
+ if (TREE_PUBLIC (EXP)) \
+ read_only_data_section (); \
+ else \
+ read_only_private_data_section (); \
+ } \
+ else \
+ { \
+ if (TREE_PUBLIC (EXP)) \
+ data_section (); \
+ else \
+ private_data_section (); \
+ } \
+}
+
+/* Return non-zero if this entry is to be written into the constant
+ pool in a special way. We do so if this is a SYMBOL_REF, LABEL_REF
+ or a CONST containing one of them. If -mfp-in-toc (the default),
+ we also do this for floating-point constants. We actually can only
+ do this if the FP formats of the target and host machines are the
+ same, but we can't check that since not every file that uses
+ GO_IF_LEGITIMATE_ADDRESS_P includes real.h. We also do this when
+ we can write the entry into the TOC and the entry is not larger
+ than a TOC entry. */
+
+#define ASM_OUTPUT_SPECIAL_POOL_ENTRY_P(X, MODE) \
+ (TARGET_TOC \
+ && (GET_CODE (X) == SYMBOL_REF \
+ || (GET_CODE (X) == CONST && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == SYMBOL_REF) \
+ || GET_CODE (X) == LABEL_REF \
+ || (GET_CODE (X) == CONST_INT \
+ && GET_MODE_BITSIZE (MODE) <= GET_MODE_BITSIZE (Pmode)) \
+ || (GET_CODE (X) == CONST_DOUBLE \
+ && (TARGET_POWERPC64 \
+ || TARGET_MINIMAL_TOC \
+ || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && ! TARGET_NO_FP_IN_TOC)))))
+
+/* Select section for constant in constant pool.
+
+ On RS/6000, all constants are in the private read-only data area.
+ However, if this is being placed in the TOC it must be output as a
+ toc entry. */
+
+#define SELECT_RTX_SECTION(MODE, X, ALIGN) \
+{ if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (X, MODE)) \
+ toc_section (); \
+ else \
+ read_only_private_data_section (); \
+}
+
+/* If we are referencing a function that is static or is known to be
+ in this file, make the SYMBOL_REF special. We can use this to indicate
+ that we can branch to this function without emitting a no-op after the
+ call. Do not set this flag if the function is weakly defined. */
+
+#define ENCODE_SECTION_INFO(DECL) \
+ if (TREE_CODE (DECL) == FUNCTION_DECL \
+ && !TREE_PUBLIC (DECL) \
+ && !DECL_WEAK (DECL)) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1;
+
+/* FP save and restore routines. */
+#define SAVE_FP_PREFIX "._savef"
+#define SAVE_FP_SUFFIX ""
+#define RESTORE_FP_PREFIX "._restf"
+#define RESTORE_FP_SUFFIX ""
+
+/* Function name to call to do profiling. */
+#undef RS6000_MCOUNT
+#define RS6000_MCOUNT ".__mcount"
+
+/* Function names to call to do floating point truncation. */
+
+#undef RS6000_ITRUNC
+#define RS6000_ITRUNC "__itrunc"
+#undef RS6000_UITRUNC
+#define RS6000_UITRUNC "__uitrunc"
+
+/* This outputs NAME to FILE up to the first null or '['. */
+
+#define RS6000_OUTPUT_BASENAME(FILE, NAME) \
+ { \
+ const char *_p; \
+ \
+ STRIP_NAME_ENCODING (_p, (NAME)); \
+ assemble_name ((FILE), _p); \
+ }
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { RS6000_OUTPUT_BASENAME (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.globl ", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); putc ('\n', FILE);} while (0)
+
+/* Remove any trailing [DS] or the like from the symbol name. */
+
+#define STRIP_NAME_ENCODING(VAR,NAME) \
+ do \
+ { \
+ const char *_name = (NAME); \
+ size_t _len; \
+ if (*_name == '*') \
+ _name++; \
+ _len = strlen (_name); \
+ if (_name[_len - 1] != ']') \
+ (VAR) = _name; \
+ else \
+ { \
+ char *_new_name = (char *) alloca (_len + 1); \
+ strcpy (_new_name, _name); \
+ _new_name[_len - 4] = '\0'; \
+ (VAR) = _new_name; \
+ } \
+ } \
+ while (0)
+
+/* Output at beginning of assembler file.
+
+ Initialize the section names for the RS/6000 at this point.
+
+ Specify filename, including full path, to assembler.
+
+ We want to go into the TOC section so at least one .toc will be emitted.
+ Also, in order to output proper .bs/.es pairs, we need at least one static
+ [RW] section emitted.
+
+ Finally, declare mcount when profiling to make the assembler happy. */
+
+#define ASM_FILE_START(FILE) \
+{ \
+ rs6000_gen_section_name (&xcoff_bss_section_name, \
+ main_input_filename, ".bss_"); \
+ rs6000_gen_section_name (&xcoff_private_data_section_name, \
+ main_input_filename, ".rw_"); \
+ rs6000_gen_section_name (&xcoff_read_only_section_name, \
+ main_input_filename, ".ro_"); \
+ \
+ fprintf (FILE, "\t.file\t\"%s\"\n", main_input_filename); \
+ if (TARGET_64BIT) \
+ fputs ("\t.machine\t\"ppc64\"\n", FILE); \
+ toc_section (); \
+ if (write_symbols != NO_DEBUG) \
+ private_data_section (); \
+ text_section (); \
+ if (profile_flag) \
+ fprintf (FILE, "\t.extern %s\n", RS6000_MCOUNT); \
+ rs6000_file_start (FILE, TARGET_CPU_DEFAULT); \
+}
+
+/* Output at end of assembler file.
+
+ On the RS/6000, referencing data should automatically pull in text. */
+
+#define ASM_FILE_END(FILE) \
+{ \
+ text_section (); \
+ fputs ("_section_.text:\n", FILE); \
+ data_section (); \
+ fputs (TARGET_32BIT \
+ ? "\t.long _section_.text\n" : "\t.llong _section_.text\n", FILE); \
+}
+
+/* This macro produces the initial definition of a function name.
+ On the RS/6000, we need to place an extra '.' in the function name and
+ output the function descriptor.
+
+ The csect for the function will have already been created by the
+ `text_section' call previously done. We do have to go back to that
+ csect, however.
+
+ We also record that the function exists in the current compilation
+ unit, reachable by short branch, by setting SYMBOL_REF_FLAG.
+
+ The third and fourth parameters to the .function pseudo-op (16 and 044)
+ are placeholders which no longer have any use. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+{ rtx sym_ref = XEXP (DECL_RTL (DECL), 0); \
+ if (!DECL_WEAK (DECL)) \
+ SYMBOL_REF_FLAG (sym_ref) = 1; \
+ if (TREE_PUBLIC (DECL)) \
+ { \
+ if (RS6000_WEAK && DECL_WEAK (decl)) \
+ { \
+ fputs ("\t.weak .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ putc ('\n', FILE); \
+ } \
+ else \
+ { \
+ fputs ("\t.globl .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ putc ('\n', FILE); \
+ } \
+ } \
+ else \
+ { \
+ fputs ("\t.lglobl .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ putc ('\n', FILE); \
+ } \
+ fputs ("\t.csect ", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ fputs (":\n", FILE); \
+ fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ fputs (", TOC[tc0], 0\n", FILE); \
+ in_section = no_section; \
+ function_section(DECL); \
+ putc ('.', FILE); \
+ RS6000_OUTPUT_BASENAME (FILE, NAME); \
+ fputs (":\n", FILE); \
+ if (write_symbols == XCOFF_DEBUG) \
+ xcoffout_declare_function (FILE, DECL, NAME); \
+}
+
+/* Output a reference to SYM on FILE. */
+
+#define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \
+ rs6000_output_symbol_ref (FILE, SYM)
+
+/* This says how to output an external. */
+
+#undef ASM_OUTPUT_EXTERNAL
+#define ASM_OUTPUT_EXTERNAL(FILE, DECL, NAME) \
+{ rtx _symref = XEXP (DECL_RTL (DECL), 0); \
+ if ((TREE_CODE (DECL) == VAR_DECL \
+ || TREE_CODE (DECL) == FUNCTION_DECL) \
+ && (NAME)[strlen (NAME) - 1] != ']') \
+ { \
+ char *_name = (char *) permalloc (strlen (XSTR (_symref, 0)) + 5); \
+ strcpy (_name, XSTR (_symref, 0)); \
+ strcat (_name, TREE_CODE (DECL) == FUNCTION_DECL ? "[DS]" : "[RW]"); \
+ XSTR (_symref, 0) = _name; \
+ } \
+}
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s..%u:\n", (PREFIX), (unsigned) (NUM))
+
+/* This is how to output an internal label prefix. rs6000.c uses this
+ when generating traceback tables. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL_PREFIX(FILE,PREFIX) \
+ fprintf (FILE, "%s..", PREFIX)
+
+/* This is how to output a label for a jump table. Arguments are the same as
+ for ASM_OUTPUT_INTERNAL_LABEL, except the insn for the jump table is
+ passed. */
+
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 2); ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); }
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*%s..%u", (PREFIX), (unsigned) (NUM))
+
+/* This is how to output an assembler line to define N characters starting
+ at P to FILE. */
+
+#define ASM_OUTPUT_ASCII(FILE, P, N) output_ascii ((FILE), (P), (N))
+
+/* This is how to advance the location counter by SIZE bytes. */
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.space %d\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGNMENT) \
+ do { fputs ("\t.comm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ if ( (SIZE) > 4) \
+ fprintf ((FILE), ",%d,3\n", (SIZE)); \
+ else \
+ fprintf ((FILE), ",%d\n", (SIZE)); \
+ } while (0)
+
+/* This says how to output an assembler line
+ to define a local common symbol.
+ Alignment cannot be specified, but we can try to maintain
+ alignment after preceding TOC section if it was aligned
+ for 64-bit mode. */
+
+#define ASM_OUTPUT_LOCAL(FILE, NAME, SIZE, ROUNDED) \
+ do { fputs ("\t.lcomm ", (FILE)); \
+ RS6000_OUTPUT_BASENAME ((FILE), (NAME)); \
+ fprintf ((FILE), ",%d,%s\n", (TARGET_32BIT ? (SIZE) : (ROUNDED)), \
+ xcoff_bss_section_name); \
+ } while (0)
+
+/* Output a weak symbol, if weak support present. */
+#ifdef HAVE_GAS_WEAK
+#define HANDLE_PRAGMA_WEAK 1
+
+#define ASM_WEAKEN_LABEL(FILE, NAME) \
+ do \
+ { \
+ fputs ("\t.weak ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputc ('\n', (FILE)); \
+ } \
+ while (0)
+#endif /* HAVE_GAS_WEAK */
+
+/* This is how we tell the assembler that two symbols have the same value. */
+#define SET_ASM_OP "\t.set "
+
+/* Used by rs6000_assemble_integer, among others. */
+#define DOUBLE_INT_ASM_OP "\t.llong\t"
+
+/* Output before instructions. */
+#define TEXT_SECTION_ASM_OP "\t.csect .text[PR]"
+
+/* Output before writable data.
+ Align entire section to BIGGEST_ALIGNMENT. */
+#define DATA_SECTION_ASM_OP "\t.csect .data[RW],3"
+
+/* Define unique section name -- functions only. */
+#define UNIQUE_SECTION(DECL,RELOC) \
+ do { \
+ int len; \
+ const char *name; \
+ char *string; \
+ \
+ if (TREE_CODE (DECL) == FUNCTION_DECL) { \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ len = strlen (name) + 5; \
+ string = alloca (len + 1); \
+ sprintf (string, ".%s[PR]", name); \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+ } \
+ } while (0)
+
+/* Switch into a generic section. */
+#define TARGET_ASM_NAMED_SECTION xcoff_asm_named_section
+
+/* Define the name of the section to use for the EH language specific
+ data areas (.gcc_except_table on most other systems). */
+#define TARGET_ASM_EXCEPTION_SECTION data_section
+
+/* Define to prevent DWARF2 unwind info in the data section rather
+ than in the .eh_frame section. We do this because the AIX linker
+ would otherwise garbage collect these sections. */
+#define EH_FRAME_IN_DATA_SECTION 1
OpenPOWER on IntegriCloud