summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>1999-08-26 09:30:50 +0000
committerobrien <obrien@FreeBSD.org>1999-08-26 09:30:50 +0000
commit0bedf4fb30066e5e1d4342a1d3914dae7d37cba7 (patch)
tree68d8110b41afd0ebbf39167b1a4918eea667a7c5 /contrib/gcc/config
parentd4db5fb866b7ad5216abd5047774a3973b9901a9 (diff)
downloadFreeBSD-src-0bedf4fb30066e5e1d4342a1d3914dae7d37cba7.zip
FreeBSD-src-0bedf4fb30066e5e1d4342a1d3914dae7d37cba7.tar.gz
Virgin import of gcc from EGCS 1.1.2
Diffstat (limited to 'contrib/gcc/config')
-rw-r--r--contrib/gcc/config/alpha/alpha.c3669
-rw-r--r--contrib/gcc/config/alpha/alpha.h1125
-rw-r--r--contrib/gcc/config/alpha/alpha.md3300
-rw-r--r--contrib/gcc/config/alpha/crtbegin.asm111
-rw-r--r--contrib/gcc/config/alpha/crtend.asm105
-rw-r--r--contrib/gcc/config/alpha/elf.h190
-rw-r--r--contrib/gcc/config/alpha/linux-ecoff.h36
-rw-r--r--contrib/gcc/config/alpha/linux-elf.h47
-rw-r--r--contrib/gcc/config/alpha/linux.h44
-rw-r--r--contrib/gcc/config/alpha/netbsd-elf.h31
-rw-r--r--contrib/gcc/config/alpha/netbsd.h38
-rw-r--r--contrib/gcc/config/alpha/openbsd.h126
-rw-r--r--contrib/gcc/config/alpha/osf.h127
-rw-r--r--contrib/gcc/config/alpha/osf12.h10
-rw-r--r--contrib/gcc/config/alpha/osf2or3.h30
-rw-r--r--contrib/gcc/config/alpha/t-crtbe9
-rw-r--r--contrib/gcc/config/alpha/t-vms6
-rw-r--r--contrib/gcc/config/alpha/va_list.h16
-rw-r--r--contrib/gcc/config/alpha/vms-tramp.asm22
-rw-r--r--contrib/gcc/config/alpha/vms.h510
-rw-r--r--contrib/gcc/config/alpha/vxworks.h51
-rw-r--r--contrib/gcc/config/alpha/x-alpha1
-rw-r--r--contrib/gcc/config/alpha/xm-alpha.h18
-rw-r--r--contrib/gcc/config/alpha/xm-openbsd.h23
-rw-r--r--contrib/gcc/config/alpha/xm-vms.h93
-rw-r--r--contrib/gcc/config/aoutos.h47
-rw-r--r--contrib/gcc/config/dbx.h30
-rw-r--r--contrib/gcc/config/dbxcoff.h87
-rw-r--r--contrib/gcc/config/float-i128.h96
-rw-r--r--contrib/gcc/config/float-i32.h96
-rw-r--r--contrib/gcc/config/float-i386.h104
-rw-r--r--contrib/gcc/config/float-i64.h96
-rw-r--r--contrib/gcc/config/float-m68k.h97
-rw-r--r--contrib/gcc/config/float-sh.h130
-rw-r--r--contrib/gcc/config/float-vax.h96
-rw-r--r--contrib/gcc/config/fp-bit.c250
-rw-r--r--contrib/gcc/config/gnu.h7
-rw-r--r--contrib/gcc/config/i386/386bsd.h9
-rw-r--r--contrib/gcc/config/i386/aix386ng.h15
-rw-r--r--contrib/gcc/config/i386/att.h21
-rw-r--r--contrib/gcc/config/i386/bsd.h8
-rw-r--r--contrib/gcc/config/i386/bsd386.h19
-rw-r--r--contrib/gcc/config/i386/crtdll.h42
-rw-r--r--contrib/gcc/config/i386/dgux.c190
-rw-r--r--contrib/gcc/config/i386/dgux.h265
-rw-r--r--contrib/gcc/config/i386/freebsd-elf.h44
-rw-r--r--contrib/gcc/config/i386/freebsd.h349
-rw-r--r--contrib/gcc/config/i386/freebsd.h.fixed349
-rw-r--r--contrib/gcc/config/i386/gas.h48
-rw-r--r--contrib/gcc/config/i386/gmon-sol2.c409
-rw-r--r--contrib/gcc/config/i386/gnu.h15
-rw-r--r--contrib/gcc/config/i386/go32.h34
-rw-r--r--contrib/gcc/config/i386/i386.c2818
-rw-r--r--contrib/gcc/config/i386/i386.h1313
-rw-r--r--contrib/gcc/config/i386/i386.md4049
-rw-r--r--contrib/gcc/config/i386/isc.h8
-rw-r--r--contrib/gcc/config/i386/linux-aout.h20
-rw-r--r--contrib/gcc/config/i386/linux-oldld.h21
-rw-r--r--contrib/gcc/config/i386/linux.h73
-rw-r--r--contrib/gcc/config/i386/lynx.h8
-rw-r--r--contrib/gcc/config/i386/mingw32.h95
-rw-r--r--contrib/gcc/config/i386/moss.h34
-rw-r--r--contrib/gcc/config/i386/netbsd.h23
-rw-r--r--contrib/gcc/config/i386/next.h15
-rw-r--r--contrib/gcc/config/i386/openbsd.h130
-rw-r--r--contrib/gcc/config/i386/osf1-ci.asm65
-rw-r--r--contrib/gcc/config/i386/osf1-cn.asm46
-rw-r--r--contrib/gcc/config/i386/osf1elf.h260
-rw-r--r--contrib/gcc/config/i386/osf1elfgdb.h7
-rw-r--r--contrib/gcc/config/i386/osfelf.h4
-rw-r--r--contrib/gcc/config/i386/osfrose.h37
-rw-r--r--contrib/gcc/config/i386/ptx4-i.h247
-rw-r--r--contrib/gcc/config/i386/rtems.h34
-rw-r--r--contrib/gcc/config/i386/rtemself.h169
-rw-r--r--contrib/gcc/config/i386/sco.h8
-rw-r--r--contrib/gcc/config/i386/sco4.h6
-rw-r--r--contrib/gcc/config/i386/sco4dbx.h6
-rw-r--r--contrib/gcc/config/i386/sco5.h957
-rw-r--r--contrib/gcc/config/i386/sco5gas.h24
-rw-r--r--contrib/gcc/config/i386/scodbx.h8
-rw-r--r--contrib/gcc/config/i386/seq-gas.h4
-rw-r--r--contrib/gcc/config/i386/seq-sysv3.h14
-rw-r--r--contrib/gcc/config/i386/sol2-c1.asm4
-rw-r--r--contrib/gcc/config/i386/sol2-gc1.asm160
-rw-r--r--contrib/gcc/config/i386/sol2.h46
-rw-r--r--contrib/gcc/config/i386/sun386.h7
-rw-r--r--contrib/gcc/config/i386/svr3.ifile5
-rw-r--r--contrib/gcc/config/i386/svr3dbx.h22
-rw-r--r--contrib/gcc/config/i386/svr3gas.h18
-rw-r--r--contrib/gcc/config/i386/svr3z.ifile5
-rw-r--r--contrib/gcc/config/i386/sysv3.h8
-rw-r--r--contrib/gcc/config/i386/sysv4.h14
-rw-r--r--contrib/gcc/config/i386/t-crtpic1
-rw-r--r--contrib/gcc/config/i386/t-dgux4
-rw-r--r--contrib/gcc/config/i386/t-mingw324
-rw-r--r--contrib/gcc/config/i386/t-next3
-rw-r--r--contrib/gcc/config/i386/t-osf2
-rw-r--r--contrib/gcc/config/i386/t-osf1elf18
-rw-r--r--contrib/gcc/config/i386/t-sco520
-rw-r--r--contrib/gcc/config/i386/t-sco5gas20
-rw-r--r--contrib/gcc/config/i386/t-sol222
-rw-r--r--contrib/gcc/config/i386/t-winnt6
-rw-r--r--contrib/gcc/config/i386/unix.h47
-rw-r--r--contrib/gcc/config/i386/vxi386.h23
-rw-r--r--contrib/gcc/config/i386/winnt.c526
-rw-r--r--contrib/gcc/config/i386/x-dgux11
-rw-r--r--contrib/gcc/config/i386/x-osf1elf8
-rw-r--r--contrib/gcc/config/i386/x-osfrose10
-rw-r--r--contrib/gcc/config/i386/x-sco510
-rw-r--r--contrib/gcc/config/i386/xm-aix.h35
-rw-r--r--contrib/gcc/config/i386/xm-bsd386.h3
-rw-r--r--contrib/gcc/config/i386/xm-dgux.h12
-rw-r--r--contrib/gcc/config/i386/xm-dos.h6
-rw-r--r--contrib/gcc/config/i386/xm-isc.h2
-rw-r--r--contrib/gcc/config/i386/xm-linux.h4
-rw-r--r--contrib/gcc/config/i386/xm-mingw32.h42
-rw-r--r--contrib/gcc/config/i386/xm-openbsd.h23
-rw-r--r--contrib/gcc/config/i386/xm-os2.h28
-rw-r--r--contrib/gcc/config/i386/xm-osf.h30
-rw-r--r--contrib/gcc/config/i386/xm-osf1elf.h6
-rw-r--r--contrib/gcc/config/i386/xm-sco.h9
-rw-r--r--contrib/gcc/config/i386/xm-sco5.h7
-rw-r--r--contrib/gcc/config/i386/xm-sun.h6
-rw-r--r--contrib/gcc/config/i386/xm-sysv4.h11
-rw-r--r--contrib/gcc/config/i386/xm-vsta.h24
-rw-r--r--contrib/gcc/config/libgloss.h35
-rw-r--r--contrib/gcc/config/linux-aout.h13
-rw-r--r--contrib/gcc/config/linux.h67
-rw-r--r--contrib/gcc/config/lynx.h5
-rw-r--r--contrib/gcc/config/netbsd.h37
-rw-r--r--contrib/gcc/config/nextstep.c48
-rw-r--r--contrib/gcc/config/nextstep.h75
-rw-r--r--contrib/gcc/config/openbsd.h302
-rw-r--r--contrib/gcc/config/psos.h183
-rw-r--r--contrib/gcc/config/ptx4.h859
-rw-r--r--contrib/gcc/config/sparc/aout.h26
-rw-r--r--contrib/gcc/config/sparc/bsd.h7
-rw-r--r--contrib/gcc/config/sparc/elf.h42
-rw-r--r--contrib/gcc/config/sparc/gmon-sol2.c429
-rw-r--r--contrib/gcc/config/sparc/lb1spc.asm784
-rw-r--r--contrib/gcc/config/sparc/lb1spl.asm246
-rw-r--r--contrib/gcc/config/sparc/linux-aout.h130
-rw-r--r--contrib/gcc/config/sparc/linux.h259
-rw-r--r--contrib/gcc/config/sparc/linux64.h245
-rw-r--r--contrib/gcc/config/sparc/lite.h38
-rw-r--r--contrib/gcc/config/sparc/litecoff.h113
-rw-r--r--contrib/gcc/config/sparc/lynx-ng.h41
-rw-r--r--contrib/gcc/config/sparc/lynx.h53
-rw-r--r--contrib/gcc/config/sparc/netbsd.h46
-rw-r--r--contrib/gcc/config/sparc/openbsd.h68
-rw-r--r--contrib/gcc/config/sparc/pbd.h184
-rw-r--r--contrib/gcc/config/sparc/rtems.h35
-rw-r--r--contrib/gcc/config/sparc/sol2-c1.asm86
-rw-r--r--contrib/gcc/config/sparc/sol2-ci.asm60
-rw-r--r--contrib/gcc/config/sparc/sol2-cn.asm54
-rw-r--r--contrib/gcc/config/sparc/sol2-g1.asm88
-rw-r--r--contrib/gcc/config/sparc/sol2-sld.h11
-rw-r--r--contrib/gcc/config/sparc/sol2.h232
-rw-r--r--contrib/gcc/config/sparc/sp64-aout.h38
-rw-r--r--contrib/gcc/config/sparc/sp64-elf.h157
-rw-r--r--contrib/gcc/config/sparc/sparc.c6461
-rw-r--r--contrib/gcc/config/sparc/sparc.h3287
-rw-r--r--contrib/gcc/config/sparc/sparc.md6684
-rw-r--r--contrib/gcc/config/sparc/splet.h53
-rw-r--r--contrib/gcc/config/sparc/sun4gas.h27
-rw-r--r--contrib/gcc/config/sparc/sun4o3.h29
-rw-r--r--contrib/gcc/config/sparc/sunos4.h49
-rw-r--r--contrib/gcc/config/sparc/sysv4.h231
-rw-r--r--contrib/gcc/config/sparc/t-elf39
-rw-r--r--contrib/gcc/config/sparc/t-sol230
-rw-r--r--contrib/gcc/config/sparc/t-sp642
-rw-r--r--contrib/gcc/config/sparc/t-sparcbare26
-rw-r--r--contrib/gcc/config/sparc/t-sparclite24
-rw-r--r--contrib/gcc/config/sparc/t-splet23
-rw-r--r--contrib/gcc/config/sparc/t-sunos407
-rw-r--r--contrib/gcc/config/sparc/t-sunos4116
-rw-r--r--contrib/gcc/config/sparc/t-vxsparc17
-rw-r--r--contrib/gcc/config/sparc/vxsim.h131
-rw-r--r--contrib/gcc/config/sparc/vxsparc.h61
-rw-r--r--contrib/gcc/config/sparc/x-sysv42
-rw-r--r--contrib/gcc/config/sparc/xm-linux.h26
-rw-r--r--contrib/gcc/config/sparc/xm-lynx.h39
-rw-r--r--contrib/gcc/config/sparc/xm-openbsd.h23
-rw-r--r--contrib/gcc/config/sparc/xm-pbd.h10
-rw-r--r--contrib/gcc/config/sparc/xm-sol2.h4
-rw-r--r--contrib/gcc/config/sparc/xm-sp64.h25
-rw-r--r--contrib/gcc/config/sparc/xm-sparc.h49
-rw-r--r--contrib/gcc/config/sparc/xm-sysv4.h48
-rw-r--r--contrib/gcc/config/svr3.h36
-rw-r--r--contrib/gcc/config/svr4.h187
-rw-r--r--contrib/gcc/config/t-freebsd5
-rw-r--r--contrib/gcc/config/t-gnu13
-rw-r--r--contrib/gcc/config/t-libc-ok1
-rw-r--r--contrib/gcc/config/t-linux-aout11
-rw-r--r--contrib/gcc/config/t-netbsd9
-rw-r--r--contrib/gcc/config/t-openbsd9
-rw-r--r--contrib/gcc/config/t-openbsd-thread5
-rw-r--r--contrib/gcc/config/t-rtems6
-rw-r--r--contrib/gcc/config/t-svr43
-rw-r--r--contrib/gcc/config/x-linux15
-rw-r--r--contrib/gcc/config/x-linux-aout14
-rw-r--r--contrib/gcc/config/xm-alloca.h4
-rw-r--r--contrib/gcc/config/xm-freebsd.h9
-rw-r--r--contrib/gcc/config/xm-gnu.h5
-rw-r--r--contrib/gcc/config/xm-linux.h19
-rw-r--r--contrib/gcc/config/xm-netbsd.h1
-rw-r--r--contrib/gcc/config/xm-openbsd.h35
-rw-r--r--contrib/gcc/config/xm-siglist.h6
-rw-r--r--contrib/gcc/config/xm-std32.h34
-rw-r--r--contrib/gcc/config/xm-svr3.h10
-rw-r--r--contrib/gcc/config/xm-svr4.h12
211 files changed, 42831 insertions, 4603 deletions
diff --git a/contrib/gcc/config/alpha/alpha.c b/contrib/gcc/config/alpha/alpha.c
index b77dc14..0b72289 100644
--- a/contrib/gcc/config/alpha/alpha.c
+++ b/contrib/gcc/config/alpha/alpha.c
@@ -1,5 +1,5 @@
/* Subroutines used for code generation on the DEC Alpha.
- Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GNU CC.
@@ -20,8 +20,8 @@ the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include <stdio.h>
#include "config.h"
+#include "system.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -34,9 +34,44 @@ Boston, MA 02111-1307, USA. */
#include "flags.h"
#include "recog.h"
#include "reload.h"
+#include "tree.h"
#include "expr.h"
#include "obstack.h"
-#include "tree.h"
+#include "except.h"
+#include "function.h"
+#include "toplev.h"
+
+/* External data. */
+extern char *version_string;
+extern int rtx_equal_function_value_matters;
+
+/* Specify which cpu to schedule for. */
+
+enum processor_type alpha_cpu;
+static char* const alpha_cpu_name[] =
+{
+ "ev4", "ev5", "ev6"
+};
+
+/* Specify how accurate floating-point traps need to be. */
+
+enum alpha_trap_precision alpha_tp;
+
+/* Specify the floating-point rounding mode. */
+
+enum alpha_fp_rounding_mode alpha_fprm;
+
+/* Specify which things cause traps. */
+
+enum alpha_fp_trap_mode alpha_fptm;
+
+/* Strings decoded into the above options. */
+
+char *alpha_cpu_string; /* -mcpu= */
+char *alpha_tp_string; /* -mtrap-precision=[p|s|i] */
+char *alpha_fprm_string; /* -mfp-rounding-mode=[n|m|c|d] */
+char *alpha_fptm_string; /* -mfp-trap-mode=[n|u|su|sui] */
+char *alpha_mlat_string; /* -mmemory-latency= */
/* Save information from a "cmpxx" operation until the branch or scc is
emitted. */
@@ -44,26 +79,226 @@ Boston, MA 02111-1307, USA. */
rtx alpha_compare_op0, alpha_compare_op1;
int alpha_compare_fp_p;
-/* Save the name of the current function as used by the assembler. This
- is used by the epilogue. */
-
-char *alpha_function_name;
-
/* Non-zero if inside of a function, because the Alpha asm can't
handle .files inside of functions. */
static int inside_function = FALSE;
-/* Nonzero if the current function needs gp. */
+/* If non-null, this rtx holds the return address for the function. */
-int alpha_function_needs_gp;
+static rtx alpha_return_addr_rtx;
-extern char *version_string;
-extern int rtx_equal_function_value_matters;
+/* The number of cycles of latency we should assume on memory reads. */
+
+int alpha_memory_latency = 3;
+
+/* Whether the function needs the GP. */
+
+static int alpha_function_needs_gp;
/* Declarations of static functions. */
-static void alpha_set_memflags_1 PROTO((rtx, int, int, int));
-static void add_long_const PROTO((FILE *, HOST_WIDE_INT, int, int, int));
+static void alpha_set_memflags_1
+ PROTO((rtx, int, int, int));
+static rtx alpha_emit_set_const_1
+ PROTO((rtx, enum machine_mode, HOST_WIDE_INT, int));
+static void alpha_expand_unaligned_load_words
+ PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
+static void alpha_expand_unaligned_store_words
+ PROTO((rtx *out_regs, rtx smem, HOST_WIDE_INT words, HOST_WIDE_INT ofs));
+static void alpha_sa_mask
+ PROTO((unsigned long *imaskP, unsigned long *fmaskP));
+static int alpha_does_function_need_gp
+ PROTO((void));
+
+
+/* Get the number of args of a function in one of two ways. */
+#ifdef OPEN_VMS
+#define NUM_ARGS current_function_args_info.num_args
+#else
+#define NUM_ARGS current_function_args_info
+#endif
+
+#define REG_PV 27
+#define REG_RA 26
+
+/* Parse target option strings. */
+
+void
+override_options ()
+{
+ alpha_cpu
+ = TARGET_CPU_DEFAULT & MASK_CPU_EV6 ? PROCESSOR_EV6
+ : (TARGET_CPU_DEFAULT & MASK_CPU_EV5 ? PROCESSOR_EV5 : PROCESSOR_EV4);
+
+ if (alpha_cpu_string)
+ {
+ if (! strcmp (alpha_cpu_string, "ev4")
+ || ! strcmp (alpha_cpu_string, "21064"))
+ {
+ alpha_cpu = PROCESSOR_EV4;
+ target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
+ }
+ else if (! strcmp (alpha_cpu_string, "ev5")
+ || ! strcmp (alpha_cpu_string, "21164"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags &= ~ (MASK_BWX | MASK_CIX | MASK_MAX);
+ }
+ else if (! strcmp (alpha_cpu_string, "ev56")
+ || ! strcmp (alpha_cpu_string, "21164a"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags |= MASK_BWX;
+ target_flags &= ~ (MASK_CIX | MASK_MAX);
+ }
+ else if (! strcmp (alpha_cpu_string, "pca56")
+ || ! strcmp (alpha_cpu_string, "21164PC")
+ || ! strcmp (alpha_cpu_string, "21164pc"))
+ {
+ alpha_cpu = PROCESSOR_EV5;
+ target_flags |= MASK_BWX | MASK_MAX;
+ target_flags &= ~ MASK_CIX;
+ }
+ else if (! strcmp (alpha_cpu_string, "ev6")
+ || ! strcmp (alpha_cpu_string, "21264"))
+ {
+ alpha_cpu = PROCESSOR_EV6;
+ target_flags |= MASK_BWX | MASK_CIX | MASK_MAX;
+ }
+ else
+ error ("bad value `%s' for -mcpu switch", alpha_cpu_string);
+ }
+
+ alpha_tp = ALPHA_TP_PROG;
+ alpha_fprm = ALPHA_FPRM_NORM;
+ alpha_fptm = ALPHA_FPTM_N;
+
+ if (TARGET_IEEE)
+ {
+ alpha_tp = ALPHA_TP_INSN;
+ alpha_fptm = ALPHA_FPTM_SU;
+ }
+
+ if (TARGET_IEEE_WITH_INEXACT)
+ {
+ alpha_tp = ALPHA_TP_INSN;
+ alpha_fptm = ALPHA_FPTM_SUI;
+ }
+
+ if (alpha_tp_string)
+ {
+ if (! strcmp (alpha_tp_string, "p"))
+ alpha_tp = ALPHA_TP_PROG;
+ else if (! strcmp (alpha_tp_string, "f"))
+ alpha_tp = ALPHA_TP_FUNC;
+ else if (! strcmp (alpha_tp_string, "i"))
+ alpha_tp = ALPHA_TP_INSN;
+ else
+ error ("bad value `%s' for -mtrap-precision switch", alpha_tp_string);
+ }
+
+ if (alpha_fprm_string)
+ {
+ if (! strcmp (alpha_fprm_string, "n"))
+ alpha_fprm = ALPHA_FPRM_NORM;
+ else if (! strcmp (alpha_fprm_string, "m"))
+ alpha_fprm = ALPHA_FPRM_MINF;
+ else if (! strcmp (alpha_fprm_string, "c"))
+ alpha_fprm = ALPHA_FPRM_CHOP;
+ else if (! strcmp (alpha_fprm_string,"d"))
+ alpha_fprm = ALPHA_FPRM_DYN;
+ else
+ error ("bad value `%s' for -mfp-rounding-mode switch",
+ alpha_fprm_string);
+ }
+
+ if (alpha_fptm_string)
+ {
+ if (strcmp (alpha_fptm_string, "n") == 0)
+ alpha_fptm = ALPHA_FPTM_N;
+ else if (strcmp (alpha_fptm_string, "u") == 0)
+ alpha_fptm = ALPHA_FPTM_U;
+ else if (strcmp (alpha_fptm_string, "su") == 0)
+ alpha_fptm = ALPHA_FPTM_SU;
+ else if (strcmp (alpha_fptm_string, "sui") == 0)
+ alpha_fptm = ALPHA_FPTM_SUI;
+ else
+ error ("bad value `%s' for -mfp-trap-mode switch", alpha_fptm_string);
+ }
+
+ /* Do some sanity checks on the above option. */
+
+ if ((alpha_fptm == ALPHA_FPTM_SU || alpha_fptm == ALPHA_FPTM_SUI)
+ && alpha_tp != ALPHA_TP_INSN)
+ {
+ warning ("fp software completion requires -mtrap-precision=i");
+ alpha_tp = ALPHA_TP_INSN;
+ }
+
+ if (TARGET_FLOAT_VAX)
+ {
+ if (alpha_fprm == ALPHA_FPRM_MINF || alpha_fprm == ALPHA_FPRM_DYN)
+ {
+ warning ("rounding mode not supported for VAX floats");
+ alpha_fprm = ALPHA_FPRM_NORM;
+ }
+ if (alpha_fptm == ALPHA_FPTM_SUI)
+ {
+ warning ("trap mode not supported for VAX floats");
+ alpha_fptm = ALPHA_FPTM_SU;
+ }
+ }
+
+ {
+ char *end;
+ int lat;
+
+ if (!alpha_mlat_string)
+ alpha_mlat_string = "L1";
+
+ if (isdigit (alpha_mlat_string[0])
+ && (lat = strtol (alpha_mlat_string, &end, 10), *end == '\0'))
+ ;
+ else if ((alpha_mlat_string[0] == 'L' || alpha_mlat_string[0] == 'l')
+ && isdigit (alpha_mlat_string[1])
+ && alpha_mlat_string[2] == '\0')
+ {
+ static int const cache_latency[][4] =
+ {
+ { 3, 30, -1 }, /* ev4 -- Bcache is a guess */
+ { 2, 12, 38 }, /* ev5 -- Bcache from PC164 LMbench numbers */
+ { 3, 13, -1 }, /* ev6 -- Ho hum, doesn't exist yet */
+ };
+
+ lat = alpha_mlat_string[1] - '0';
+ if (lat < 0 || lat > 3 || cache_latency[alpha_cpu][lat-1] == -1)
+ {
+ warning ("L%d cache latency unknown for %s",
+ lat, alpha_cpu_name[alpha_cpu]);
+ lat = 3;
+ }
+ else
+ lat = cache_latency[alpha_cpu][lat-1];
+ }
+ else if (! strcmp (alpha_mlat_string, "main"))
+ {
+ /* Most current memories have about 370ns latency. This is
+ a reasonable guess for a fast cpu. */
+ lat = 150;
+ }
+ else
+ {
+ warning ("bad value `%s' for -mmemory-latency", alpha_mlat_string);
+ lat = 3;
+ }
+
+ alpha_memory_latency = lat;
+ }
+
+ /* Default the definition of "small data" to 8 bytes. */
+ if (!g_switch_set)
+ g_switch_value = 8;
+}
/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */
@@ -102,6 +337,7 @@ reg_or_6bit_operand (op, mode)
{
return ((GET_CODE (op) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op) < 64)
+ || GET_CODE (op) == CONSTANT_P_RTX
|| register_operand (op, mode));
}
@@ -115,6 +351,7 @@ reg_or_8bit_operand (op, mode)
{
return ((GET_CODE (op) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
+ || GET_CODE (op) == CONSTANT_P_RTX
|| register_operand (op, mode));
}
@@ -123,10 +360,11 @@ reg_or_8bit_operand (op, mode)
int
cint8_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
- return (GET_CODE (op) == CONST_INT
- && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100);
+ return ((GET_CODE (op) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (op) < 0x100)
+ || GET_CODE (op) == CONSTANT_P_RTX);
}
/* Return 1 if the operand is a valid second operand to an add insn. */
@@ -140,6 +378,8 @@ add_operand (op, mode)
return (CONST_OK_FOR_LETTER_P (INTVAL (op), 'K')
|| CONST_OK_FOR_LETTER_P (INTVAL (op), 'L')
|| CONST_OK_FOR_LETTER_P (INTVAL (op), 'O'));
+ else if (GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
return register_operand (op, mode);
}
@@ -155,6 +395,8 @@ sext_add_operand (op, mode)
if (GET_CODE (op) == CONST_INT)
return ((unsigned HOST_WIDE_INT) INTVAL (op) < 255
|| (unsigned HOST_WIDE_INT) (- INTVAL (op)) < 255);
+ else if (GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
return register_operand (op, mode);
}
@@ -164,7 +406,7 @@ sext_add_operand (op, mode)
int
const48_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) == 4 || INTVAL (op) == 8));
@@ -185,6 +427,8 @@ and_operand (op, mode)
return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
|| (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100
|| zap_mask (INTVAL (op)));
+ else if (GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
return register_operand (op, mode);
}
@@ -199,6 +443,8 @@ or_operand (op, mode)
if (GET_CODE (op) == CONST_INT)
return ((unsigned HOST_WIDE_INT) INTVAL (op) < 0x100
|| (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100);
+ else if (GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
return register_operand (op, mode);
}
@@ -209,10 +455,11 @@ or_operand (op, mode)
int
mode_width_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == CONST_INT
- && (INTVAL (op) == 8 || INTVAL (op) == 16 || INTVAL (op) == 32));
+ && (INTVAL (op) == 8 || INTVAL (op) == 16
+ || INTVAL (op) == 32 || INTVAL (op) == 64));
}
/* Return 1 if OP is a constant that is the width of an integral machine mode
@@ -221,18 +468,24 @@ mode_width_operand (op, mode)
int
mode_mask_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
#if HOST_BITS_PER_WIDE_INT == 32
if (GET_CODE (op) == CONST_DOUBLE)
- return CONST_DOUBLE_HIGH (op) == 0 && CONST_DOUBLE_LOW (op) == -1;
+ return (CONST_DOUBLE_LOW (op) == -1
+ && (CONST_DOUBLE_HIGH (op) == -1
+ || CONST_DOUBLE_HIGH (op) == 0));
+#else
+ if (GET_CODE (op) == CONST_DOUBLE)
+ return (CONST_DOUBLE_LOW (op) == -1 && CONST_DOUBLE_HIGH (op) == 0);
#endif
return (GET_CODE (op) == CONST_INT
&& (INTVAL (op) == 0xff
|| INTVAL (op) == 0xffff
-#if HOST_BITS_PER_WIDE_INT == 64
|| INTVAL (op) == 0xffffffff
+#if HOST_BITS_PER_WIDE_INT == 64
+ || INTVAL (op) == 0xffffffffffffffff
#endif
));
}
@@ -242,7 +495,7 @@ mode_mask_operand (op, mode)
int
mul8_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == CONST_INT
&& (unsigned HOST_WIDE_INT) INTVAL (op) < 64
@@ -270,6 +523,18 @@ reg_or_fp0_operand (op, mode)
return fp0_operand (op, mode) || register_operand (op, mode);
}
+/* Return 1 if OP is a hard floating-point register. */
+
+int
+hard_fp_register_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ return ((GET_CODE (op) == REG && REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS)
+ || (GET_CODE (op) == SUBREG
+ && hard_fp_register_operand (SUBREG_REG (op), mode)));
+}
+
/* Return 1 if OP is a register or a constant integer. */
@@ -278,7 +543,9 @@ reg_or_cint_operand (op, mode)
register rtx op;
enum machine_mode mode;
{
- return GET_CODE (op) == CONST_INT || register_operand (op, mode);
+ return (GET_CODE (op) == CONST_INT
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || register_operand (op, mode));
}
/* Return 1 if OP is something that can be reloaded into a register;
@@ -294,12 +561,15 @@ some_operand (op, mode)
switch (GET_CODE (op))
{
- case REG: case MEM: case CONST_DOUBLE:
- case CONST_INT: case LABEL_REF: case SYMBOL_REF: case CONST:
+ case REG: case MEM: case CONST_DOUBLE: case CONST_INT: case LABEL_REF:
+ case SYMBOL_REF: case CONST: case CONSTANT_P_RTX:
return 1;
case SUBREG:
return some_operand (SUBREG_REG (op), VOIDmode);
+
+ default:
+ break;
}
return 0;
@@ -323,7 +593,7 @@ input_operand (op, mode)
case LABEL_REF:
case SYMBOL_REF:
case CONST:
- /* This handles both the Windows/NT and OSF cases. */
+ /* This handles both the Windows/NT and OSF cases. */
return mode == ptr_mode || mode == DImode;
case REG:
@@ -334,13 +604,18 @@ input_operand (op, mode)
return 1;
/* ... fall through ... */
case MEM:
- return mode != HImode && mode != QImode && general_operand (op, mode);
+ return ((TARGET_BWX || (mode != HImode && mode != QImode))
+ && general_operand (op, mode));
case CONST_DOUBLE:
return GET_MODE_CLASS (mode) == MODE_FLOAT && op == CONST0_RTX (mode);
case CONST_INT:
+ case CONSTANT_P_RTX:
return mode == QImode || mode == HImode || add_operand (op, mode);
+
+ default:
+ break;
}
return 0;
@@ -352,7 +627,7 @@ input_operand (op, mode)
int
current_file_function_operand (op, mode)
rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == SYMBOL_REF
&& ! profile_flag && ! profile_block_flag
@@ -370,7 +645,9 @@ call_operand (op, mode)
if (mode != Pmode)
return 0;
- return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG);
+ return (GET_CODE (op) == SYMBOL_REF
+ || (GET_CODE (op) == REG
+ && (TARGET_OPEN_VMS || TARGET_WINDOWS_NT || REGNO (op) == 27)));
}
/* Return 1 if OP is a valid Alpha comparison operator. Here we know which
@@ -390,17 +667,37 @@ alpha_comparison_operator (op, mode)
|| (mode == DImode && (code == LEU || code == LTU)));
}
+/* Return 1 if OP is a valid Alpha swapped comparison operator. */
+
+int
+alpha_swapped_comparison_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (mode != GET_MODE (op) || GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ code = swap_condition (code);
+ return (code == EQ || code == LE || code == LT
+ || (mode == DImode && (code == LEU || code == LTU)));
+}
+
/* Return 1 if OP is a signed comparison operation. */
int
signed_comparison_operator (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
switch (GET_CODE (op))
{
case EQ: case NE: case LE: case LT: case GE: case GT:
return 1;
+
+ default:
+ break;
}
return 0;
@@ -411,12 +708,15 @@ signed_comparison_operator (op, mode)
int
divmod_operator (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
switch (GET_CODE (op))
{
case DIV: case MOD: case UDIV: case UMOD:
return 1;
+
+ default:
+ break;
}
return 0;
@@ -459,10 +759,7 @@ aligned_memory_operand (op, mode)
op = XEXP (op, 0);
return (GET_CODE (op) == REG
- && (REGNO (op) == STACK_POINTER_REGNUM
- || op == hard_frame_pointer_rtx
- || (REGNO (op) >= FIRST_VIRTUAL_REGISTER
- && REGNO (op) <= LAST_VIRTUAL_REGISTER)));
+ && REGNO_POINTER_ALIGN (REGNO (op)) >= 4);
}
/* Similar, but return 1 if OP is a MEM which is not alignable. */
@@ -496,10 +793,17 @@ unaligned_memory_operand (op, mode)
op = XEXP (op, 0);
return (GET_CODE (op) != REG
- || (REGNO (op) != STACK_POINTER_REGNUM
- && op != hard_frame_pointer_rtx
- && (REGNO (op) < FIRST_VIRTUAL_REGISTER
- || REGNO (op) > LAST_VIRTUAL_REGISTER)));
+ || REGNO_POINTER_ALIGN (REGNO (op)) < 4);
+}
+
+/* Return 1 if OP is either a register or an unaligned memory location. */
+
+int
+reg_or_unaligned_mem_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return register_operand (op, mode) || unaligned_memory_operand (op, mode);
}
/* Return 1 if OP is any memory location. During reload a pseudo matches. */
@@ -507,7 +811,7 @@ unaligned_memory_operand (op, mode)
int
any_memory_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return (GET_CODE (op) == MEM
|| (GET_CODE (op) == SUBREG && GET_CODE (SUBREG_REG (op)) == REG)
@@ -518,6 +822,17 @@ any_memory_operand (op, mode)
&& REGNO (SUBREG_REG (op)) >= FIRST_PSEUDO_REGISTER));
}
+/* Return 1 if this function can directly return via $26. */
+
+int
+direct_return ()
+{
+ return (! TARGET_OPEN_VMS && reload_completed && alpha_sa_size () == 0
+ && get_frame_size () == 0
+ && current_function_outgoing_args_size == 0
+ && current_function_pretend_args_size == 0);
+}
+
/* REF is an alignable memory location. Place an aligned SImode
reference into *PALIGNED_MEM and the number of bits to shift into
*PBITNUM. */
@@ -551,7 +866,7 @@ get_aligned_mem (ref, paligned_mem, pbitnum)
if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
- *paligned_mem = gen_rtx (MEM, SImode,
+ *paligned_mem = gen_rtx_MEM (SImode,
plus_constant (base, offset & ~3));
MEM_IN_STRUCT_P (*paligned_mem) = MEM_IN_STRUCT_P (ref);
MEM_VOLATILE_P (*paligned_mem) = MEM_VOLATILE_P (ref);
@@ -560,11 +875,13 @@ get_aligned_mem (ref, paligned_mem, pbitnum)
*pbitnum = GEN_INT ((offset & 3) * 8);
}
-/* Similar, but just get the address. Handle the two reload cases. */
+/* Similar, but just get the address. Handle the two reload cases.
+ Add EXTRA_OFFSET to the address we return. */
rtx
-get_unaligned_address (ref)
+get_unaligned_address (ref, extra_offset)
rtx ref;
+ int extra_offset;
{
rtx base;
HOST_WIDE_INT offset = 0;
@@ -590,7 +907,7 @@ get_unaligned_address (ref)
if (GET_CODE (base) == PLUS)
offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0);
- return plus_constant (base, offset);
+ return plus_constant (base, offset + extra_offset);
}
/* Subfunction of the following function. Update the flags of any MEM
@@ -629,6 +946,9 @@ alpha_set_memflags_1 (x, in_struct_p, volatile_p, unchanging_p)
MEM_VOLATILE_P (x) = volatile_p;
RTX_UNCHANGING_P (x) = unchanging_p;
break;
+
+ default:
+ break;
}
}
@@ -669,6 +989,26 @@ alpha_emit_set_const (target, mode, c, n)
HOST_WIDE_INT c;
int n;
{
+ rtx pat;
+ int i;
+
+ /* Try 1 insn, then 2, then up to N. */
+ for (i = 1; i <= n; i++)
+ if ((pat = alpha_emit_set_const_1 (target, mode, c, i)) != 0)
+ return pat;
+
+ return 0;
+}
+
+/* Internal routine for the above to check for N or below insns. */
+
+static rtx
+alpha_emit_set_const_1 (target, mode, c, n)
+ rtx target;
+ enum machine_mode mode;
+ HOST_WIDE_INT c;
+ int n;
+{
HOST_WIDE_INT new = c;
int i, bits;
/* Use a pseudo if highly optimizing and still generating RTL. */
@@ -688,12 +1028,10 @@ alpha_emit_set_const (target, mode, c, n)
/* If this is a sign-extended 32-bit constant, we can do this in at most
three insns, so do it if we have enough insns left. We always have
- a sign-extended 32-bit constant when compiling on a narrow machine.
- Note that we cannot handle the constant 0x80000000. */
+ a sign-extended 32-bit constant when compiling on a narrow machine. */
- if ((HOST_BITS_PER_WIDE_INT != 64
- || c >> 31 == -1 || c >> 31 == 0)
- && c != 0x80000000U)
+ if (HOST_BITS_PER_WIDE_INT != 64
+ || c >> 31 == -1 || c >> 31 == 0)
{
HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
HOST_WIDE_INT tmp1 = c - low;
@@ -712,13 +1050,19 @@ alpha_emit_set_const (target, mode, c, n)
}
if (c == low || (low == 0 && extra == 0))
- return copy_to_suggested_reg (GEN_INT (c), target, mode);
- else if (n >= 2 + (extra != 0)
- /* We can't do this when SImode if HIGH required adjustment.
- This is because the code relies on an implicit overflow
- which is invisible to the RTL. We can thus get incorrect
- code if the two ldah instructions are combined. */
- && ! (mode == SImode && extra != 0))
+ {
+ /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode)
+ but that meant that we can't handle INT_MIN on 32-bit machines
+ (like NT/Alpha), because we recurse indefinitely through
+ emit_move_insn to gen_movdi. So instead, since we know exactly
+ what we want, create it explicitly. */
+
+ if (target == NULL)
+ target = gen_reg_rtx (mode);
+ emit_insn (gen_rtx_SET (VOIDmode, target, GEN_INT (c)));
+ return target;
+ }
+ else if (n >= 2 + (extra != 0))
{
temp = copy_to_suggested_reg (GEN_INT (low), subtarget, mode);
@@ -795,9 +1139,11 @@ alpha_emit_set_const (target, mode, c, n)
/* Now try high-order zero bits. Here we try the shifted-in bits as
all zero and all ones. Be careful to avoid shifting outside the
mode and to avoid shifting outside the host wide int size. */
+ /* On narrow hosts, don't shift a 1 into the high bit, since we'll
+ confuse the recursive call and set all of the high 32 bits. */
if ((bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8)
- - floor_log2 (c) - 1)) > 0)
+ - floor_log2 (c) - 1 - (HOST_BITS_PER_WIDE_INT < 64))) > 0)
for (; bits > 0; bits--)
if ((temp = alpha_emit_set_const (subtarget, mode,
c << bits, i)) != 0
@@ -829,6 +1175,1077 @@ alpha_emit_set_const (target, mode, c, n)
return 0;
}
+
+#if HOST_BITS_PER_WIDE_INT == 64
+/* Having failed to find a 3 insn sequence in alpha_emit_set_const,
+ fall back to a straight forward decomposition. We do this to avoid
+ exponential run times encountered when looking for longer sequences
+ with alpha_emit_set_const. */
+
+rtx
+alpha_emit_set_long_const (target, c)
+ rtx target;
+ HOST_WIDE_INT c;
+{
+ /* Use a pseudo if highly optimizing and still generating RTL. */
+ rtx subtarget
+ = (flag_expensive_optimizations && rtx_equal_function_value_matters
+ ? 0 : target);
+ HOST_WIDE_INT d1, d2, d3, d4;
+ rtx r1, r2;
+
+ /* Decompose the entire word */
+ d1 = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ c -= d1;
+ d2 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
+ c = (c - d2) >> 32;
+ d3 = ((c & 0xffff) ^ 0x8000) - 0x8000;
+ c -= d3;
+ d4 = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
+
+ if (c - d4 != 0)
+ abort();
+
+ /* Construct the high word */
+ if (d3 == 0)
+ r1 = copy_to_suggested_reg (GEN_INT (d4), subtarget, DImode);
+ else if (d4 == 0)
+ r1 = copy_to_suggested_reg (GEN_INT (d3), subtarget, DImode);
+ else
+ r1 = expand_binop (DImode, add_optab, GEN_INT (d3), GEN_INT (d4),
+ subtarget, 0, OPTAB_WIDEN);
+
+ /* Shift it into place */
+ r2 = expand_binop (DImode, ashl_optab, r1, GEN_INT (32),
+ subtarget, 0, OPTAB_WIDEN);
+
+ if (subtarget == 0 && d1 == d3 && d2 == d4)
+ r1 = expand_binop (DImode, add_optab, r1, r2, subtarget, 0, OPTAB_WIDEN);
+ else
+ {
+ r1 = r2;
+
+ /* Add in the low word */
+ if (d2 != 0)
+ r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d2),
+ subtarget, 0, OPTAB_WIDEN);
+ if (d1 != 0)
+ r1 = expand_binop (DImode, add_optab, r1, GEN_INT (d1),
+ subtarget, 0, OPTAB_WIDEN);
+ }
+
+ if (subtarget == 0)
+ r1 = copy_to_suggested_reg(r1, target, DImode);
+
+ return r1;
+}
+#endif /* HOST_BITS_PER_WIDE_INT == 64 */
+
+/* Generate the comparison for a conditional branch. */
+
+rtx
+alpha_emit_conditional_branch (code)
+ enum rtx_code code;
+{
+ enum rtx_code cmp_code, branch_code;
+ enum machine_mode cmp_mode, branch_mode = VOIDmode;
+ rtx op0 = alpha_compare_op0, op1 = alpha_compare_op1;
+ rtx tem;
+
+ /* The general case: fold the comparison code to the types of compares
+ that we have, choosing the branch as necessary. */
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ /* We have these compares: */
+ cmp_code = code, branch_code = NE;
+ break;
+
+ case NE:
+ /* This must be reversed. */
+ cmp_code = EQ, branch_code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* For FP, we swap them, for INT, we reverse them. */
+ if (alpha_compare_fp_p)
+ {
+ cmp_code = swap_condition (code);
+ branch_code = NE;
+ tem = op0, op0 = op1, op1 = tem;
+ }
+ else
+ {
+ cmp_code = reverse_condition (code);
+ branch_code = EQ;
+ }
+ break;
+
+ default:
+ abort ();
+ }
+
+ if (alpha_compare_fp_p)
+ {
+ cmp_mode = DFmode;
+ if (flag_fast_math)
+ {
+ /* When we are not as concerned about non-finite values, and we
+ are comparing against zero, we can branch directly. */
+ if (op1 == CONST0_RTX (DFmode))
+ cmp_code = NIL, branch_code = code;
+ else if (op0 == CONST0_RTX (DFmode))
+ {
+ /* Undo the swap we probably did just above. */
+ tem = op0, op0 = op1, op1 = tem;
+ branch_code = swap_condition (cmp_code);
+ cmp_code = NIL;
+ }
+ }
+ else
+ {
+ /* ??? We mark the the branch mode to be CCmode to prevent the
+ compare and branch from being combined, since the compare
+ insn follows IEEE rules that the branch does not. */
+ branch_mode = CCmode;
+ }
+ }
+ else
+ {
+ cmp_mode = DImode;
+
+ /* The following optimizations are only for signed compares. */
+ if (code != LEU && code != LTU && code != GEU && code != GTU)
+ {
+ /* Whee. Compare and branch against 0 directly. */
+ if (op1 == const0_rtx)
+ cmp_code = NIL, branch_code = code;
+
+ /* We want to use cmpcc/bcc when we can, since there is a zero delay
+ bypass between logicals and br/cmov on EV5. But we don't want to
+ force valid immediate constants into registers needlessly. */
+ else if (GET_CODE (op1) == CONST_INT)
+ {
+ HOST_WIDE_INT v = INTVAL (op1), n = -v;
+
+ if (! CONST_OK_FOR_LETTER_P (v, 'I')
+ && (CONST_OK_FOR_LETTER_P (n, 'K')
+ || CONST_OK_FOR_LETTER_P (n, 'L')))
+ {
+ cmp_code = PLUS, branch_code = code;
+ op1 = GEN_INT (n);
+ }
+ }
+ }
+ }
+
+ /* Force op0 into a register. */
+ if (GET_CODE (op0) != REG)
+ op0 = force_reg (cmp_mode, op0);
+
+ /* Emit an initial compare instruction, if necessary. */
+ tem = op0;
+ if (cmp_code != NIL)
+ {
+ tem = gen_reg_rtx (cmp_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1));
+ }
+
+ /* Return the branch comparison. */
+ return gen_rtx_fmt_ee (branch_code, branch_mode, tem, CONST0_RTX (cmp_mode));
+}
+
+
+/* Rewrite a comparison against zero CMP of the form
+ (CODE (cc0) (const_int 0)) so it can be written validly in
+ a conditional move (if_then_else CMP ...).
+ If both of the operands that set cc0 are non-zero we must emit
+ an insn to perform the compare (it can't be done within
+ the conditional move). */
+rtx
+alpha_emit_conditional_move (cmp, mode)
+ rtx cmp;
+ enum machine_mode mode;
+{
+ enum rtx_code code = GET_CODE (cmp);
+ enum rtx_code cmov_code = NE;
+ rtx op0 = alpha_compare_op0;
+ rtx op1 = alpha_compare_op1;
+ enum machine_mode cmp_mode
+ = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0));
+ enum machine_mode cmp_op_mode = alpha_compare_fp_p ? DFmode : DImode;
+ enum machine_mode cmov_mode = VOIDmode;
+ rtx tem;
+
+ if (alpha_compare_fp_p != FLOAT_MODE_P (mode))
+ return 0;
+
+ /* We may be able to use a conditional move directly.
+ This avoids emitting spurious compares. */
+ if (signed_comparison_operator (cmp, cmp_op_mode)
+ && (!alpha_compare_fp_p || flag_fast_math)
+ && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode)))
+ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
+
+ /* We can't put the comparison insides a conditional move;
+ emit a compare instruction and put that inside the
+ conditional move. Make sure we emit only comparisons we have;
+ swap or reverse as necessary. */
+
+ switch (code)
+ {
+ case EQ: case LE: case LT: case LEU: case LTU:
+ /* We have these compares: */
+ break;
+
+ case NE:
+ /* This must be reversed. */
+ code = reverse_condition (code);
+ cmov_code = EQ;
+ break;
+
+ case GE: case GT: case GEU: case GTU:
+ /* These must be swapped. Make sure the new first operand is in
+ a register. */
+ code = swap_condition (code);
+ tem = op0, op0 = op1, op1 = tem;
+ op0 = force_reg (cmp_mode, op0);
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* ??? We mark the the branch mode to be CCmode to prevent the compare
+ and cmov from being combined, since the compare insn follows IEEE
+ rules that the cmov does not. */
+ if (alpha_compare_fp_p && !flag_fast_math)
+ cmov_mode = CCmode;
+
+ tem = gen_reg_rtx (cmp_op_mode);
+ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_op_mode, op0, op1));
+ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_op_mode));
+}
+
+/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting
+ unaligned data:
+
+ unsigned: signed:
+ word: ldq_u r1,X(r11) ldq_u r1,X(r11)
+ ldq_u r2,X+1(r11) ldq_u r2,X+1(r11)
+ lda r3,X(r11) lda r3,X+2(r11)
+ extwl r1,r3,r1 extql r1,r3,r1
+ extwh r2,r3,r2 extqh r2,r3,r2
+ or r1.r2.r1 or r1,r2,r1
+ sra r1,48,r1
+
+ long: ldq_u r1,X(r11) ldq_u r1,X(r11)
+ ldq_u r2,X+3(r11) ldq_u r2,X+3(r11)
+ lda r3,X(r11) lda r3,X(r11)
+ extll r1,r3,r1 extll r1,r3,r1
+ extlh r2,r3,r2 extlh r2,r3,r2
+ or r1.r2.r1 addl r1,r2,r1
+
+ quad: ldq_u r1,X(r11)
+ ldq_u r2,X+7(r11)
+ lda r3,X(r11)
+ extql r1,r3,r1
+ extqh r2,r3,r2
+ or r1.r2.r1
+*/
+
+void
+alpha_expand_unaligned_load (tgt, mem, size, ofs, sign)
+ rtx tgt, mem;
+ HOST_WIDE_INT size, ofs;
+ int sign;
+{
+ rtx meml, memh, addr, extl, exth;
+ enum machine_mode mode;
+
+ meml = gen_reg_rtx (DImode);
+ memh = gen_reg_rtx (DImode);
+ addr = gen_reg_rtx (DImode);
+ extl = gen_reg_rtx (DImode);
+ exth = gen_reg_rtx (DImode);
+
+ emit_move_insn (meml,
+ change_address (mem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP (mem, 0),
+ ofs),
+ GEN_INT (-8))));
+
+ emit_move_insn (memh,
+ change_address (mem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP (mem, 0),
+ ofs + size - 1),
+ GEN_INT (-8))));
+
+ if (sign && size == 2)
+ {
+ emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs+2));
+
+ emit_insn (gen_extxl (extl, meml, GEN_INT (64), addr));
+ emit_insn (gen_extqh (exth, memh, addr));
+
+ /* We must use tgt here for the target. Alpha-vms port fails if we use
+ addr for the target, because addr is marked as a pointer and combine
+ knows that pointers are always sign-extended 32 bit values. */
+ addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN);
+ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48),
+ addr, 1, OPTAB_WIDEN);
+ }
+ else
+ {
+ emit_move_insn (addr, plus_constant (XEXP (mem, 0), ofs));
+ emit_insn (gen_extxl (extl, meml, GEN_INT (size*8), addr));
+ switch (size)
+ {
+ case 2:
+ emit_insn (gen_extwh (exth, memh, addr));
+ mode = HImode;
+ break;
+
+ case 4:
+ emit_insn (gen_extlh (exth, memh, addr));
+ mode = SImode;
+ break;
+
+ case 8:
+ emit_insn (gen_extqh (exth, memh, addr));
+ mode = DImode;
+ break;
+ }
+
+ addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl),
+ gen_lowpart (mode, exth), gen_lowpart (mode, tgt),
+ sign, OPTAB_WIDEN);
+ }
+
+ if (addr != tgt)
+ emit_move_insn (tgt, gen_lowpart(GET_MODE (tgt), addr));
+}
+
+/* Similarly, use ins and msk instructions to perform unaligned stores. */
+
+void
+alpha_expand_unaligned_store (dst, src, size, ofs)
+ rtx dst, src;
+ HOST_WIDE_INT size, ofs;
+{
+ rtx dstl, dsth, addr, insl, insh, meml, memh;
+
+ dstl = gen_reg_rtx (DImode);
+ dsth = gen_reg_rtx (DImode);
+ insl = gen_reg_rtx (DImode);
+ insh = gen_reg_rtx (DImode);
+
+ meml = change_address (dst, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP (dst, 0), ofs),
+ GEN_INT (-8)));
+ memh = change_address (dst, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP (dst, 0),
+ ofs+size-1),
+ GEN_INT (-8)));
+
+ emit_move_insn (dsth, memh);
+ emit_move_insn (dstl, meml);
+ addr = copy_addr_to_reg (plus_constant (XEXP (dst, 0), ofs));
+
+ if (src != const0_rtx)
+ {
+ emit_insn (gen_insxh (insh, gen_lowpart (DImode, src),
+ GEN_INT (size*8), addr));
+
+ switch (size)
+ {
+ case 2:
+ emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr));
+ break;
+ case 4:
+ emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr));
+ break;
+ case 8:
+ emit_insn (gen_insql (insl, src, addr));
+ break;
+ }
+ }
+
+ emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size*8), addr));
+
+ switch (size)
+ {
+ case 2:
+ emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffff), addr));
+ break;
+ case 4:
+ emit_insn (gen_mskxl (dstl, dstl, GEN_INT (0xffffffff), addr));
+ break;
+ case 8:
+ {
+#if HOST_BITS_PER_WIDE_INT == 32
+ rtx msk = immed_double_const (0xffffffff, 0xffffffff, DImode);
+#else
+ rtx msk = immed_double_const (0xffffffffffffffff, 0, DImode);
+#endif
+ emit_insn (gen_mskxl (dstl, dstl, msk, addr));
+ }
+ break;
+ }
+
+ if (src != const0_rtx)
+ {
+ dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN);
+ dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN);
+ }
+
+ /* Must store high before low for degenerate case of aligned. */
+ emit_move_insn (memh, dsth);
+ emit_move_insn (meml, dstl);
+}
+
+/* The block move code tries to maximize speed by separating loads and
+ stores at the expense of register pressure: we load all of the data
+ before we store it back out. There are two secondary effects worth
+ mentioning, that this speeds copying to/from aligned and unaligned
+ buffers, and that it makes the code significantly easier to write. */
+
+#define MAX_MOVE_WORDS 8
+
+/* Load an integral number of consecutive unaligned quadwords. */
+
+static void
+alpha_expand_unaligned_load_words (out_regs, smem, words, ofs)
+ rtx *out_regs;
+ rtx smem;
+ HOST_WIDE_INT words, ofs;
+{
+ rtx const im8 = GEN_INT (-8);
+ rtx const i64 = GEN_INT (64);
+ rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS+1];
+ rtx sreg, areg;
+ HOST_WIDE_INT i;
+
+ /* Generate all the tmp registers we need. */
+ for (i = 0; i < words; ++i)
+ {
+ data_regs[i] = out_regs[i];
+ ext_tmps[i] = gen_reg_rtx (DImode);
+ }
+ data_regs[words] = gen_reg_rtx (DImode);
+
+ if (ofs != 0)
+ smem = change_address (smem, GET_MODE (smem),
+ plus_constant (XEXP (smem, 0), ofs));
+
+ /* Load up all of the source data. */
+ for (i = 0; i < words; ++i)
+ {
+ emit_move_insn (data_regs[i],
+ change_address (smem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP(smem,0),
+ 8*i),
+ im8)));
+ }
+ emit_move_insn (data_regs[words],
+ change_address (smem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP(smem,0),
+ 8*words - 1),
+ im8)));
+
+ /* Extract the half-word fragments. Unfortunately DEC decided to make
+ extxh with offset zero a noop instead of zeroing the register, so
+ we must take care of that edge condition ourselves with cmov. */
+
+ sreg = copy_addr_to_reg (XEXP (smem, 0));
+ areg = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL,
+ 1, OPTAB_WIDEN);
+ for (i = 0; i < words; ++i)
+ {
+ emit_insn (gen_extxl (data_regs[i], data_regs[i], i64, sreg));
+
+ emit_insn (gen_extqh (ext_tmps[i], data_regs[i+1], sreg));
+ emit_insn (gen_rtx_SET (VOIDmode, ext_tmps[i],
+ gen_rtx_IF_THEN_ELSE (DImode,
+ gen_rtx_EQ (DImode, areg,
+ const0_rtx),
+ const0_rtx, ext_tmps[i])));
+ }
+
+ /* Merge the half-words into whole words. */
+ for (i = 0; i < words; ++i)
+ {
+ out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i],
+ ext_tmps[i], data_regs[i], 1, OPTAB_WIDEN);
+ }
+}
+
+/* Store an integral number of consecutive unaligned quadwords. DATA_REGS
+ may be NULL to store zeros. */
+
+static void
+alpha_expand_unaligned_store_words (data_regs, dmem, words, ofs)
+ rtx *data_regs;
+ rtx dmem;
+ HOST_WIDE_INT words, ofs;
+{
+ rtx const im8 = GEN_INT (-8);
+ rtx const i64 = GEN_INT (64);
+#if HOST_BITS_PER_WIDE_INT == 32
+ rtx const im1 = immed_double_const (0xffffffff, 0xffffffff, DImode);
+#else
+ rtx const im1 = immed_double_const (0xffffffffffffffff, 0, DImode);
+#endif
+ rtx ins_tmps[MAX_MOVE_WORDS];
+ rtx st_tmp_1, st_tmp_2, dreg;
+ rtx st_addr_1, st_addr_2;
+ HOST_WIDE_INT i;
+
+ /* Generate all the tmp registers we need. */
+ if (data_regs != NULL)
+ for (i = 0; i < words; ++i)
+ ins_tmps[i] = gen_reg_rtx(DImode);
+ st_tmp_1 = gen_reg_rtx(DImode);
+ st_tmp_2 = gen_reg_rtx(DImode);
+
+ if (ofs != 0)
+ dmem = change_address (dmem, GET_MODE (dmem),
+ plus_constant (XEXP (dmem, 0), ofs));
+
+
+ st_addr_2 = change_address (dmem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant (XEXP(dmem,0),
+ words*8 - 1),
+ im8));
+ st_addr_1 = change_address (dmem, DImode,
+ gen_rtx_AND (DImode,
+ XEXP (dmem, 0),
+ im8));
+
+ /* Load up the destination end bits. */
+ emit_move_insn (st_tmp_2, st_addr_2);
+ emit_move_insn (st_tmp_1, st_addr_1);
+
+ /* Shift the input data into place. */
+ dreg = copy_addr_to_reg (XEXP (dmem, 0));
+ if (data_regs != NULL)
+ {
+ for (i = words-1; i >= 0; --i)
+ {
+ emit_insn (gen_insxh (ins_tmps[i], data_regs[i], i64, dreg));
+ emit_insn (gen_insql (data_regs[i], data_regs[i], dreg));
+ }
+ for (i = words-1; i > 0; --i)
+ {
+ ins_tmps[i-1] = expand_binop (DImode, ior_optab, data_regs[i],
+ ins_tmps[i-1], ins_tmps[i-1], 1,
+ OPTAB_WIDEN);
+ }
+ }
+
+ /* Split and merge the ends with the destination data. */
+ emit_insn (gen_mskxh (st_tmp_2, st_tmp_2, i64, dreg));
+ emit_insn (gen_mskxl (st_tmp_1, st_tmp_1, im1, dreg));
+
+ if (data_regs != NULL)
+ {
+ st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words-1],
+ st_tmp_2, 1, OPTAB_WIDEN);
+ st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0],
+ st_tmp_1, 1, OPTAB_WIDEN);
+ }
+
+ /* Store it all. */
+ emit_move_insn (st_addr_2, st_tmp_2);
+ for (i = words-1; i > 0; --i)
+ {
+ emit_move_insn (change_address (dmem, DImode,
+ gen_rtx_AND (DImode,
+ plus_constant(XEXP (dmem,0),
+ i*8),
+ im8)),
+ data_regs ? ins_tmps[i-1] : const0_rtx);
+ }
+ emit_move_insn (st_addr_1, st_tmp_1);
+}
+
+
+/* Expand string/block move operations.
+
+ operands[0] is the pointer to the destination.
+ operands[1] is the pointer to the source.
+ operands[2] is the number of bytes to move.
+ operands[3] is the alignment. */
+
+int
+alpha_expand_block_move (operands)
+ rtx operands[];
+{
+ rtx bytes_rtx = operands[2];
+ rtx align_rtx = operands[3];
+ HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT src_align = INTVAL (align_rtx);
+ HOST_WIDE_INT dst_align = src_align;
+ rtx orig_src = operands[1];
+ rtx orig_dst = operands[0];
+ rtx data_regs[2*MAX_MOVE_WORDS+16];
+ rtx tmp;
+ int i, words, ofs, nregs = 0;
+
+ if (bytes <= 0)
+ return 1;
+ if (bytes > MAX_MOVE_WORDS*8)
+ return 0;
+
+ /* Look for additional alignment information from recorded register info. */
+
+ tmp = XEXP (orig_src, 0);
+ if (GET_CODE (tmp) == REG)
+ {
+ if (REGNO_POINTER_ALIGN (REGNO (tmp)) > src_align)
+ src_align = REGNO_POINTER_ALIGN (REGNO (tmp));
+ }
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > src_align)
+ {
+ if (a >= 8 && c % 8 == 0)
+ src_align = 8;
+ else if (a >= 4 && c % 4 == 0)
+ src_align = 4;
+ else if (a >= 2 && c % 2 == 0)
+ src_align = 2;
+ }
+ }
+
+ tmp = XEXP (orig_dst, 0);
+ if (GET_CODE (tmp) == REG)
+ {
+ if (REGNO_POINTER_ALIGN (REGNO (tmp)) > dst_align)
+ dst_align = REGNO_POINTER_ALIGN (REGNO (tmp));
+ }
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > dst_align)
+ {
+ if (a >= 8 && c % 8 == 0)
+ dst_align = 8;
+ else if (a >= 4 && c % 4 == 0)
+ dst_align = 4;
+ else if (a >= 2 && c % 2 == 0)
+ dst_align = 2;
+ }
+ }
+
+ /*
+ * Load the entire block into registers.
+ */
+
+ if (GET_CODE (XEXP (orig_src, 0)) == ADDRESSOF)
+ {
+ enum machine_mode mode;
+ tmp = XEXP (XEXP (orig_src, 0), 0);
+
+ mode = mode_for_size (bytes, MODE_INT, 1);
+ if (mode != BLKmode
+ && GET_MODE_SIZE (GET_MODE (tmp)) <= bytes)
+ {
+ /* Whee! Optimize the load to use the existing register. */
+ data_regs[nregs++] = gen_lowpart (mode, tmp);
+ goto src_done;
+ }
+
+ /* ??? We could potentially be copying 3 bytes or whatnot from
+ a wider reg. Probably not worth worrying about. */
+ /* No appropriate mode; fall back on memory. */
+ orig_src = change_address (orig_src, GET_MODE (orig_src),
+ copy_addr_to_reg (XEXP (orig_src, 0)));
+ }
+
+ ofs = 0;
+ if (src_align >= 8 && bytes >= 8)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words; ++i)
+ data_regs[nregs+i] = gen_reg_rtx(DImode);
+
+ for (i = 0; i < words; ++i)
+ {
+ emit_move_insn (data_regs[nregs+i],
+ change_address(orig_src, DImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs + i*8)));
+ }
+
+ nregs += words;
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+ if (src_align >= 4 && bytes >= 4)
+ {
+ words = bytes / 4;
+
+ for (i = 0; i < words; ++i)
+ data_regs[nregs+i] = gen_reg_rtx(SImode);
+
+ for (i = 0; i < words; ++i)
+ {
+ emit_move_insn (data_regs[nregs+i],
+ change_address(orig_src, SImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs + i*4)));
+ }
+
+ nregs += words;
+ bytes -= words * 4;
+ ofs += words * 4;
+ }
+ if (bytes >= 16)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words+1; ++i)
+ data_regs[nregs+i] = gen_reg_rtx(DImode);
+
+ alpha_expand_unaligned_load_words(data_regs+nregs, orig_src, words, ofs);
+
+ nregs += words;
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+ if (!TARGET_BWX && bytes >= 8)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (DImode);
+ alpha_expand_unaligned_load (tmp, orig_src, 8, ofs, 0);
+ bytes -= 8;
+ ofs += 8;
+ }
+ if (!TARGET_BWX && bytes >= 4)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (SImode);
+ alpha_expand_unaligned_load (tmp, orig_src, 4, ofs, 0);
+ bytes -= 4;
+ ofs += 4;
+ }
+ if (bytes >= 2)
+ {
+ if (src_align >= 2)
+ {
+ do {
+ data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
+ emit_move_insn (tmp,
+ change_address (orig_src, HImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs)));
+ bytes -= 2;
+ ofs += 2;
+ } while (bytes >= 2);
+ }
+ else if (!TARGET_BWX)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (HImode);
+ alpha_expand_unaligned_load (tmp, orig_src, 2, ofs, 0);
+ bytes -= 2;
+ ofs += 2;
+ }
+ }
+ while (bytes > 0)
+ {
+ data_regs[nregs++] = tmp = gen_reg_rtx (QImode);
+ emit_move_insn (tmp,
+ change_address (orig_src, QImode,
+ plus_constant (XEXP (orig_src, 0),
+ ofs)));
+ bytes -= 1;
+ ofs += 1;
+ }
+ src_done:
+
+ if (nregs > sizeof(data_regs)/sizeof(*data_regs))
+ abort();
+
+ /*
+ * Now save it back out again.
+ */
+
+ i = 0, ofs = 0;
+
+ if (GET_CODE (XEXP (orig_dst, 0)) == ADDRESSOF)
+ {
+ enum machine_mode mode;
+ tmp = XEXP (XEXP (orig_dst, 0), 0);
+
+ mode = mode_for_size (bytes, MODE_INT, 1);
+ if (GET_MODE (tmp) == mode && nregs == 1)
+ {
+ emit_move_insn (tmp, data_regs[0]);
+ i = 1;
+ goto dst_done;
+ }
+
+ /* ??? If nregs > 1, consider reconstructing the word in regs. */
+ /* ??? Optimize mode < dst_mode with strict_low_part. */
+ /* No appropriate mode; fall back on memory. */
+ orig_dst = change_address (orig_dst, GET_MODE (orig_dst),
+ copy_addr_to_reg (XEXP (orig_dst, 0)));
+ }
+
+ /* Write out the data in whatever chunks reading the source allowed. */
+ if (dst_align >= 8)
+ {
+ while (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ emit_move_insn (change_address(orig_dst, DImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ data_regs[i]);
+ ofs += 8;
+ i++;
+ }
+ }
+ if (dst_align >= 4)
+ {
+ /* If the source has remaining DImode regs, write them out in
+ two pieces. */
+ while (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32),
+ NULL_RTX, 1, OPTAB_WIDEN);
+
+ emit_move_insn (change_address(orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ gen_lowpart (SImode, data_regs[i]));
+ emit_move_insn (change_address(orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs+4)),
+ gen_lowpart (SImode, tmp));
+ ofs += 8;
+ i++;
+ }
+
+ while (i < nregs && GET_MODE (data_regs[i]) == SImode)
+ {
+ emit_move_insn (change_address(orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ data_regs[i]);
+ ofs += 4;
+ i++;
+ }
+ }
+ if (i < nregs && GET_MODE (data_regs[i]) == DImode)
+ {
+ /* Write out a remaining block of words using unaligned methods. */
+
+ for (words = 1; i+words < nregs ; ++words)
+ if (GET_MODE (data_regs[i+words]) != DImode)
+ break;
+
+ if (words == 1)
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs);
+ else
+ alpha_expand_unaligned_store_words (data_regs+i, orig_dst, words, ofs);
+
+ i += words;
+ ofs += words * 8;
+ }
+
+ /* Due to the above, this won't be aligned. */
+ /* ??? If we have more than one of these, consider constructing full
+ words in registers and using alpha_expand_unaligned_store_words. */
+ while (i < nregs && GET_MODE (data_regs[i]) == SImode)
+ {
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs);
+ ofs += 4;
+ i++;
+ }
+
+ if (dst_align >= 2)
+ while (i < nregs && GET_MODE (data_regs[i]) == HImode)
+ {
+ emit_move_insn (change_address (orig_dst, HImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ data_regs[i]);
+ i++;
+ ofs += 2;
+ }
+ else
+ while (i < nregs && GET_MODE (data_regs[i]) == HImode)
+ {
+ alpha_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs);
+ i++;
+ ofs += 2;
+ }
+ while (i < nregs && GET_MODE (data_regs[i]) == QImode)
+ {
+ emit_move_insn (change_address (orig_dst, QImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ data_regs[i]);
+ i++;
+ ofs += 1;
+ }
+ dst_done:
+
+ if (i != nregs)
+ abort();
+
+ return 1;
+}
+
+int
+alpha_expand_block_clear (operands)
+ rtx operands[];
+{
+ rtx bytes_rtx = operands[1];
+ rtx align_rtx = operands[2];
+ HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
+ HOST_WIDE_INT align = INTVAL (align_rtx);
+ rtx orig_dst = operands[0];
+ rtx tmp;
+ HOST_WIDE_INT i, words, ofs = 0;
+
+ if (bytes <= 0)
+ return 1;
+ if (bytes > MAX_MOVE_WORDS*8)
+ return 0;
+
+ /* Look for stricter alignment. */
+
+ tmp = XEXP (orig_dst, 0);
+ if (GET_CODE (tmp) == REG)
+ {
+ if (REGNO_POINTER_ALIGN (REGNO (tmp)) > align)
+ align = REGNO_POINTER_ALIGN (REGNO (tmp));
+ }
+ else if (GET_CODE (tmp) == PLUS
+ && GET_CODE (XEXP (tmp, 0)) == REG
+ && GET_CODE (XEXP (tmp, 1)) == CONST_INT)
+ {
+ HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1));
+ int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0)));
+
+ if (a > align)
+ {
+ if (a >= 8 && c % 8 == 0)
+ align = 8;
+ else if (a >= 4 && c % 4 == 0)
+ align = 4;
+ else if (a >= 2 && c % 2 == 0)
+ align = 2;
+ }
+ }
+
+ /* Handle a block of contiguous words first. */
+
+ if (align >= 8 && bytes >= 8)
+ {
+ words = bytes / 8;
+
+ for (i = 0; i < words; ++i)
+ {
+ emit_move_insn (change_address(orig_dst, DImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs + i*8)),
+ const0_rtx);
+ }
+
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+ if (align >= 4 && bytes >= 4)
+ {
+ words = bytes / 4;
+
+ for (i = 0; i < words; ++i)
+ {
+ emit_move_insn (change_address(orig_dst, SImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs + i*4)),
+ const0_rtx);
+ }
+
+ bytes -= words * 4;
+ ofs += words * 4;
+ }
+ if (bytes >= 16)
+ {
+ words = bytes / 8;
+
+ alpha_expand_unaligned_store_words (NULL, orig_dst, words, ofs);
+
+ bytes -= words * 8;
+ ofs += words * 8;
+ }
+
+ /* Next clean up any trailing pieces. We know from the contiguous
+ block move that there are no aligned SImode or DImode hunks left. */
+
+ if (!TARGET_BWX && bytes >= 8)
+ {
+ alpha_expand_unaligned_store (orig_dst, const0_rtx, 8, ofs);
+ bytes -= 8;
+ ofs += 8;
+ }
+ if (!TARGET_BWX && bytes >= 4)
+ {
+ alpha_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs);
+ bytes -= 4;
+ ofs += 4;
+ }
+ if (bytes >= 2)
+ {
+ if (align >= 2)
+ {
+ do {
+ emit_move_insn (change_address (orig_dst, HImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ const0_rtx);
+ bytes -= 2;
+ ofs += 2;
+ } while (bytes >= 2);
+ }
+ else if (!TARGET_BWX)
+ {
+ alpha_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs);
+ bytes -= 2;
+ ofs += 2;
+ }
+ }
+ while (bytes > 0)
+ {
+ emit_move_insn (change_address (orig_dst, QImode,
+ plus_constant (XEXP (orig_dst, 0),
+ ofs)),
+ const0_rtx);
+ bytes -= 1;
+ ofs += 1;
+ }
+
+ return 1;
+}
+
/* Adjust the cost of a scheduling dependency. Return the new cost of
a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
@@ -840,7 +2257,8 @@ alpha_adjust_cost (insn, link, dep_insn, cost)
rtx dep_insn;
int cost;
{
- rtx set;
+ rtx set, set_src;
+ enum attr_type insn_type, dep_insn_type;
/* If the dependence is an anti-dependence, there is no cost. For an
output dependence, there is sometimes a cost, but it doesn't seem
@@ -849,60 +2267,224 @@ alpha_adjust_cost (insn, link, dep_insn, cost)
if (REG_NOTE_KIND (link) != 0)
return 0;
- /* If INSN is a store insn and DEP_INSN is setting the data being stored,
- we can sometimes lower the cost. */
+ /* If we can't recognize the insns, we can't really do anything. */
+ if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
+ return cost;
- if (recog_memoized (insn) >= 0 && get_attr_type (insn) == TYPE_ST
- && (set = single_set (dep_insn)) != 0
- && GET_CODE (PATTERN (insn)) == SET
- && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
- switch (get_attr_type (dep_insn))
- {
- case TYPE_LD:
- /* No savings here. */
- return cost;
-
- case TYPE_IMULL:
- case TYPE_IMULQ:
- /* In these cases, we save one cycle. */
- return cost - 2;
-
- default:
- /* In all other cases, we save two cycles. */
- return MAX (0, cost - 4);
- }
+ insn_type = get_attr_type (insn);
+ dep_insn_type = get_attr_type (dep_insn);
- /* Another case that needs adjustment is an arithmetic or logical
- operation. It's cost is usually one cycle, but we default it to
- two in the MD file. The only case that it is actually two is
- for the address in loads and stores. */
+ /* Bring in the user-defined memory latency. */
+ if (dep_insn_type == TYPE_ILD
+ || dep_insn_type == TYPE_FLD
+ || dep_insn_type == TYPE_LDSYM)
+ cost += alpha_memory_latency-1;
- if (recog_memoized (dep_insn) >= 0
- && get_attr_type (dep_insn) == TYPE_IADDLOG)
- switch (get_attr_type (insn))
- {
- case TYPE_LD:
- case TYPE_ST:
- return cost;
+ switch (alpha_cpu)
+ {
+ case PROCESSOR_EV4:
+ /* On EV4, if INSN is a store insn and DEP_INSN is setting the data
+ being stored, we can sometimes lower the cost. */
+
+ if ((insn_type == TYPE_IST || insn_type == TYPE_FST)
+ && (set = single_set (dep_insn)) != 0
+ && GET_CODE (PATTERN (insn)) == SET
+ && rtx_equal_p (SET_DEST (set), SET_SRC (PATTERN (insn))))
+ {
+ switch (dep_insn_type)
+ {
+ case TYPE_ILD:
+ case TYPE_FLD:
+ /* No savings here. */
+ return cost;
+
+ case TYPE_IMUL:
+ /* In these cases, we save one cycle. */
+ return cost - 1;
+
+ default:
+ /* In all other cases, we save two cycles. */
+ return MAX (0, cost - 2);
+ }
+ }
- default:
- return 2;
- }
+ /* Another case that needs adjustment is an arithmetic or logical
+ operation. It's cost is usually one cycle, but we default it to
+ two in the MD file. The only case that it is actually two is
+ for the address in loads, stores, and jumps. */
- /* The final case is when a compare feeds into an integer branch. The cost
- is only one cycle in that case. */
+ if (dep_insn_type == TYPE_IADD || dep_insn_type == TYPE_ILOG)
+ {
+ switch (insn_type)
+ {
+ case TYPE_ILD:
+ case TYPE_IST:
+ case TYPE_FLD:
+ case TYPE_FST:
+ case TYPE_JSR:
+ return cost;
+ default:
+ return 1;
+ }
+ }
- if (recog_memoized (dep_insn) >= 0
- && get_attr_type (dep_insn) == TYPE_ICMP
- && recog_memoized (insn) >= 0
- && get_attr_type (insn) == TYPE_IBR)
- return 2;
+ /* The final case is when a compare feeds into an integer branch;
+ the cost is only one cycle in that case. */
- /* Otherwise, return the default cost. */
+ if (dep_insn_type == TYPE_ICMP && insn_type == TYPE_IBR)
+ return 1;
+ break;
+
+ case PROCESSOR_EV5:
+ /* And the lord DEC saith: "A special bypass provides an effective
+ latency of 0 cycles for an ICMP or ILOG insn producing the test
+ operand of an IBR or ICMOV insn." */
+
+ if ((dep_insn_type == TYPE_ICMP || dep_insn_type == TYPE_ILOG)
+ && (set = single_set (dep_insn)) != 0)
+ {
+ /* A branch only has one input. This must be it. */
+ if (insn_type == TYPE_IBR)
+ return 0;
+ /* A conditional move has three, make sure it is the test. */
+ if (insn_type == TYPE_ICMOV
+ && GET_CODE (set_src = PATTERN (insn)) == SET
+ && GET_CODE (set_src = SET_SRC (set_src)) == IF_THEN_ELSE
+ && rtx_equal_p (SET_DEST (set), XEXP (set_src, 0)))
+ return 0;
+ }
+
+ /* "The multiplier is unable to receive data from IEU bypass paths.
+ The instruction issues at the expected time, but its latency is
+ increased by the time it takes for the input data to become
+ available to the multiplier" -- which happens in pipeline stage
+ six, when results are comitted to the register file. */
+ if (insn_type == TYPE_IMUL)
+ {
+ switch (dep_insn_type)
+ {
+ /* These insns produce their results in pipeline stage five. */
+ case TYPE_ILD:
+ case TYPE_ICMOV:
+ case TYPE_IMUL:
+ case TYPE_MVI:
+ return cost + 1;
+
+ /* Other integer insns produce results in pipeline stage four. */
+ default:
+ return cost + 2;
+ }
+ }
+ break;
+
+ case PROCESSOR_EV6:
+ /* There is additional latency to move the result of (most) FP
+ operations anywhere but the FP register file. */
+
+ if ((insn_type == TYPE_FST || insn_type == TYPE_FTOI)
+ && (dep_insn_type == TYPE_FADD ||
+ dep_insn_type == TYPE_FMUL ||
+ dep_insn_type == TYPE_FCMOV))
+ return cost + 2;
+
+ break;
+ }
+
+ /* Otherwise, return the default cost. */
return cost;
}
+/* Functions to save and restore alpha_return_addr_rtx. */
+
+struct machine_function
+{
+ rtx ra_rtx;
+};
+
+static void
+alpha_save_machine_status (p)
+ struct function *p;
+{
+ struct machine_function *machine =
+ (struct machine_function *) xmalloc (sizeof (struct machine_function));
+
+ p->machine = machine;
+ machine->ra_rtx = alpha_return_addr_rtx;
+}
+
+static void
+alpha_restore_machine_status (p)
+ struct function *p;
+{
+ struct machine_function *machine = p->machine;
+
+ alpha_return_addr_rtx = machine->ra_rtx;
+
+ free (machine);
+ p->machine = (struct machine_function *)0;
+}
+
+/* Do anything needed before RTL is emitted for each function. */
+
+void
+alpha_init_expanders ()
+{
+ alpha_return_addr_rtx = NULL_RTX;
+
+ /* Arrange to save and restore machine status around nested functions. */
+ save_machine_status = alpha_save_machine_status;
+ restore_machine_status = alpha_restore_machine_status;
+}
+
+/* Start the ball rolling with RETURN_ADDR_RTX. */
+
+rtx
+alpha_return_addr (count, frame)
+ int count;
+ rtx frame ATTRIBUTE_UNUSED;
+{
+ rtx init;
+
+ if (count != 0)
+ return const0_rtx;
+
+ if (alpha_return_addr_rtx)
+ return alpha_return_addr_rtx;
+
+ /* No rtx yet. Invent one, and initialize it from $26 in the prologue. */
+ alpha_return_addr_rtx = gen_reg_rtx (Pmode);
+ init = gen_rtx_SET (Pmode, alpha_return_addr_rtx,
+ gen_rtx_REG (Pmode, REG_RA));
+
+ /* Emit the insn to the prologue with the other argument copies. */
+ push_topmost_sequence ();
+ emit_insn_after (init, get_insns ());
+ pop_topmost_sequence ();
+
+ return alpha_return_addr_rtx;
+}
+
+static int
+alpha_ra_ever_killed ()
+{
+ rtx top;
+
+#ifdef ASM_OUTPUT_MI_THUNK
+ if (current_function_is_thunk)
+ return 0;
+#endif
+ if (!alpha_return_addr_rtx)
+ return regs_ever_live[REG_RA];
+
+ push_topmost_sequence ();
+ top = get_insns();
+ pop_topmost_sequence ();
+
+ return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL_RTX);
+}
+
+
/* Print an operand. Recognize special options, documented below. */
void
@@ -915,6 +2497,115 @@ print_operand (file, x, code)
switch (code)
{
+ case '&':
+ /* Generates fp-rounding mode suffix: nothing for normal, 'c' for
+ chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
+ mode. alpha_fprm controls which suffix is generated. */
+ switch (alpha_fprm)
+ {
+ case ALPHA_FPRM_NORM:
+ break;
+ case ALPHA_FPRM_MINF:
+ fputc ('m', file);
+ break;
+ case ALPHA_FPRM_CHOP:
+ fputc ('c', file);
+ break;
+ case ALPHA_FPRM_DYN:
+ fputc ('d', file);
+ break;
+ }
+ break;
+
+ case '\'':
+ /* Generates trap-mode suffix for instructions that accept the su
+ suffix only (cmpt et al). */
+ if (alpha_tp == ALPHA_TP_INSN)
+ fputs ("su", file);
+ break;
+
+ case '`':
+ /* Generates trap-mode suffix for instructions that accept the
+ v and sv suffix. The only instruction that needs this is cvtql. */
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ break;
+ case ALPHA_FPTM_U:
+ fputs ("v", file);
+ break;
+ case ALPHA_FPTM_SU:
+ case ALPHA_FPTM_SUI:
+ fputs ("sv", file);
+ break;
+ }
+ break;
+
+ case '(':
+ /* Generates trap-mode suffix for instructions that accept the
+ v, sv, and svi suffix. The only instruction that needs this
+ is cvttq. */
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ break;
+ case ALPHA_FPTM_U:
+ fputs ("v", file);
+ break;
+ case ALPHA_FPTM_SU:
+ fputs ("sv", file);
+ break;
+ case ALPHA_FPTM_SUI:
+ fputs ("svi", file);
+ break;
+ }
+ break;
+
+ case ')':
+ /* Generates trap-mode suffix for instructions that accept the u, su,
+ and sui suffix. This is the bulk of the IEEE floating point
+ instructions (addt et al). */
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ break;
+ case ALPHA_FPTM_U:
+ fputc ('u', file);
+ break;
+ case ALPHA_FPTM_SU:
+ fputs ("su", file);
+ break;
+ case ALPHA_FPTM_SUI:
+ fputs ("sui", file);
+ break;
+ }
+ break;
+
+ case '+':
+ /* Generates trap-mode suffix for instructions that accept the sui
+ suffix (cvtqt and cvtqs). */
+ switch (alpha_fptm)
+ {
+ case ALPHA_FPTM_N:
+ case ALPHA_FPTM_U:
+ case ALPHA_FPTM_SU: /* cvtqt/cvtqs can't cause underflow */
+ break;
+ case ALPHA_FPTM_SUI:
+ fputs ("sui", file);
+ break;
+ }
+ break;
+
+ case ',':
+ /* Generates single precision instruction suffix. */
+ fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'f' : 's'));
+ break;
+
+ case '-':
+ /* Generates double precision instruction suffix. */
+ fprintf (file, "%c", (TARGET_FLOAT_VAX ? 'g' : 't'));
+ break;
+
case 'r':
/* If this operand is the constant zero, write it as "$31". */
if (GET_CODE (x) == REG)
@@ -942,7 +2633,7 @@ print_operand (file, x, code)
if (GET_CODE (x) != CONST_INT)
output_operand_lossage ("invalid %%N value");
- fprintf (file, "%ld", ~ INTVAL (x));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
break;
case 'P':
@@ -950,7 +2641,7 @@ print_operand (file, x, code)
if (GET_CODE (x) != CONST_INT)
output_operand_lossage ("invalid %%P value");
- fprintf (file, "%ld", (HOST_WIDE_INT) 1 << INTVAL (x));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT) 1 << INTVAL (x));
break;
case 'h':
@@ -958,7 +2649,7 @@ print_operand (file, x, code)
if (GET_CODE (x) != CONST_INT)
output_operand_lossage ("invalid %%h value");
- fprintf (file, "%ld", INTVAL (x) >> 16);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16);
break;
case 'L':
@@ -966,7 +2657,8 @@ print_operand (file, x, code)
if (GET_CODE (x) != CONST_INT)
output_operand_lossage ("invalid %%L value");
- fprintf (file, "%ld", (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000));
break;
case 'm':
@@ -988,7 +2680,7 @@ print_operand (file, x, code)
if (value & 0xff)
mask |= (1 << (i + sizeof (int)));
- fprintf (file, "%ld", mask & 0xff);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask & 0xff);
}
else if (GET_CODE (x) == CONST_INT)
@@ -999,20 +2691,24 @@ print_operand (file, x, code)
if (value & 0xff)
mask |= (1 << i);
- fprintf (file, "%ld", mask);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask);
}
else
output_operand_lossage ("invalid %%m value");
break;
case 'M':
- /* 'b', 'w', or 'l' as the value of the constant. */
+ /* 'b', 'w', 'l', or 'q' as the value of the constant. */
if (GET_CODE (x) != CONST_INT
- || (INTVAL (x) != 8 && INTVAL (x) != 16 && INTVAL (x) != 32))
+ || (INTVAL (x) != 8 && INTVAL (x) != 16
+ && INTVAL (x) != 32 && INTVAL (x) != 64))
output_operand_lossage ("invalid %%M value");
fprintf (file, "%s",
- INTVAL (x) == 8 ? "b" : INTVAL (x) == 16 ? "w" : "l");
+ (INTVAL (x) == 8 ? "b"
+ : INTVAL (x) == 16 ? "w"
+ : INTVAL (x) == 32 ? "l"
+ : "q"));
break;
case 'U':
@@ -1021,14 +2717,24 @@ print_operand (file, x, code)
fprintf (file, "b");
else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffff)
fprintf (file, "w");
+ else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
+ fprintf (file, "l");
#if HOST_BITS_PER_WIDE_INT == 32
else if (GET_CODE (x) == CONST_DOUBLE
&& CONST_DOUBLE_HIGH (x) == 0
&& CONST_DOUBLE_LOW (x) == -1)
fprintf (file, "l");
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (x) == -1
+ && CONST_DOUBLE_LOW (x) == -1)
+ fprintf (file, "q");
#else
- else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffff)
- fprintf (file, "l");
+ else if (GET_CODE (x) == CONST_INT && INTVAL (x) == 0xffffffffffffffff)
+ fprintf (file, "q");
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (x) == 0
+ && CONST_DOUBLE_LOW (x) == -1)
+ fprintf (file, "q");
#endif
else
output_operand_lossage ("invalid %%U value");
@@ -1041,7 +2747,7 @@ print_operand (file, x, code)
&& (INTVAL (x) & 7) != 8)
output_operand_lossage ("invalid %%s value");
- fprintf (file, "%ld", INTVAL (x) / 8);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8);
break;
case 'S':
@@ -1052,46 +2758,31 @@ print_operand (file, x, code)
&& (INTVAL (x) & 7) != 8)
output_operand_lossage ("invalid %%s value");
- fprintf (file, "%ld", (64 - INTVAL (x)) / 8);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, (64 - INTVAL (x)) / 8);
break;
- case 'C':
+ case 'C': case 'D': case 'c': case 'd':
/* Write out comparison name. */
- if (GET_RTX_CLASS (GET_CODE (x)) != '<')
- output_operand_lossage ("invalid %%C value");
-
- if (GET_CODE (x) == LEU)
- fprintf (file, "ule");
- else if (GET_CODE (x) == LTU)
- fprintf (file, "ult");
- else
- fprintf (file, "%s", GET_RTX_NAME (GET_CODE (x)));
- break;
-
- case 'D':
- /* Similar, but write reversed code. We can't get an unsigned code
- here. */
- if (GET_RTX_CLASS (GET_CODE (x)) != '<')
- output_operand_lossage ("invalid %%D value");
-
- fprintf (file, "%s", GET_RTX_NAME (reverse_condition (GET_CODE (x))));
- break;
-
- case 'c':
- /* Similar to `c', but swap. We can't get unsigned here either. */
- if (GET_RTX_CLASS (GET_CODE (x)) != '<')
- output_operand_lossage ("invalid %%D value");
-
- fprintf (file, "%s", GET_RTX_NAME (swap_condition (GET_CODE (x))));
- break;
-
- case 'd':
- /* Similar, but reverse and swap. We can't get unsigned here either. */
- if (GET_RTX_CLASS (GET_CODE (x)) != '<')
- output_operand_lossage ("invalid %%D value");
-
- fprintf (file, "%s",
- GET_RTX_NAME (swap_condition (reverse_condition ((GET_CODE (x))))));
+ {
+ enum rtx_code c = GET_CODE (x);
+
+ if (GET_RTX_CLASS (c) != '<')
+ output_operand_lossage ("invalid %%C value");
+
+ if (code == 'D')
+ c = reverse_condition (c);
+ else if (code == 'c')
+ c = swap_condition (c);
+ else if (code == 'd')
+ c = swap_condition (reverse_condition (c));
+
+ if (c == LEU)
+ fprintf (file, "ule");
+ else if (c == LTU)
+ fprintf (file, "ult");
+ else
+ fprintf (file, "%s", GET_RTX_NAME (c));
+ }
break;
case 'E':
@@ -1136,6 +2827,65 @@ print_operand (file, x, code)
}
}
+/* Emit RTL insns to initialize the variable parts of a trampoline at
+ TRAMP. FNADDR is an RTX for the address of the function's pure
+ code. CXT is an RTX for the static chain value for the function.
+
+ The three offset parameters are for the individual template's
+ layout. A JMPOFS < 0 indicates that the trampoline does not
+ contain instructions at all.
+
+ We assume here that a function will be called many more times than
+ its address is taken (e.g., it might be passed to qsort), so we
+ take the trouble to initialize the "hint" field in the JMP insn.
+ Note that the hint field is PC (new) + 4 * bits 13:0. */
+
+void
+alpha_initialize_trampoline (tramp, fnaddr, cxt, fnofs, cxtofs, jmpofs)
+ rtx tramp, fnaddr, cxt;
+ int fnofs, cxtofs, jmpofs;
+{
+ rtx temp, temp1, addr;
+ /* ??? Something is wrong with VMS codegen in that we get aborts when
+ using ptr_mode. Hack around it for now. */
+ enum machine_mode mode = TARGET_OPEN_VMS ? Pmode : ptr_mode;
+
+ /* Store function address and CXT. */
+ addr = memory_address (mode, plus_constant (tramp, fnofs));
+ emit_move_insn (gen_rtx (MEM, mode, addr), fnaddr);
+ addr = memory_address (mode, plus_constant (tramp, cxtofs));
+ emit_move_insn (gen_rtx (MEM, mode, addr), cxt);
+
+ /* This has been disabled since the hint only has a 32k range, and in
+ no existing OS is the stack within 32k of the text segment. */
+ if (0 && jmpofs >= 0)
+ {
+ /* Compute hint value. */
+ temp = force_operand (plus_constant (tramp, jmpofs+4), NULL_RTX);
+ temp = expand_binop (DImode, sub_optab, fnaddr, temp, temp, 1,
+ OPTAB_WIDEN);
+ temp = expand_shift (RSHIFT_EXPR, Pmode, temp,
+ build_int_2 (2, 0), NULL_RTX, 1);
+ temp = expand_and (gen_lowpart (SImode, temp), GEN_INT (0x3fff), 0);
+
+ /* Merge in the hint. */
+ addr = memory_address (SImode, plus_constant (tramp, jmpofs));
+ temp1 = force_reg (SImode, gen_rtx (MEM, SImode, addr));
+ temp1 = expand_and (temp1, GEN_INT (0xffffc000), NULL_RTX);
+ temp1 = expand_binop (SImode, ior_optab, temp1, temp, temp1, 1,
+ OPTAB_WIDEN);
+ emit_move_insn (gen_rtx (MEM, SImode, addr), temp1);
+ }
+
+#ifdef TRANSFER_FROM_TRAMPOLINE
+ emit_library_call (gen_rtx (SYMBOL_REF, Pmode, "__enable_execute_stack"),
+ 0, VOIDmode, 1, addr, Pmode);
+#endif
+
+ if (jmpofs >= 0)
+ emit_insn (gen_imb ());
+}
+
/* Do what is necessary for `va_start'. The argument is ignored;
We look at the current function to determine if stdarg or varargs
is used and fill in an initial va_list. A pointer to this constructor
@@ -1143,9 +2893,9 @@ print_operand (file, x, code)
struct rtx_def *
alpha_builtin_saveregs (arglist)
- tree arglist;
+ tree arglist ATTRIBUTE_UNUSED;
{
- rtx block, addr, argsize;
+ rtx block, addr, dest, argsize;
tree fntype = TREE_TYPE (current_function_decl);
int stdarg = (TYPE_ARG_TYPES (fntype) != 0
&& (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
@@ -1153,11 +2903,11 @@ alpha_builtin_saveregs (arglist)
/* Compute the current position into the args, taking into account
both registers and memory. Both of these are already included in
- current_function_args_info. */
+ NUM_ARGS. */
- argsize = GEN_INT (current_function_args_info * UNITS_PER_WORD);
+ argsize = GEN_INT (NUM_ARGS * UNITS_PER_WORD);
- /* SETUP_INCOMING_VARARGS moves the starting address base up by 48,
+ /* For Unix, SETUP_INCOMING_VARARGS moves the starting address base up by 48,
storing fp arg registers in the first 48 bytes, and the integer arg
registers in the next 48 bytes. This is only done, however, if any
integer registers need to be stored.
@@ -1166,35 +2916,71 @@ alpha_builtin_saveregs (arglist)
order to account for the integer arg registers which are counted in
argsize above, but which are not actually stored on the stack. */
- addr = (current_function_args_info <= 5 + stdarg
- ? plus_constant (virtual_incoming_args_rtx, 6 * UNITS_PER_WORD)
- : plus_constant (virtual_incoming_args_rtx, - (6 * UNITS_PER_WORD)));
-
- addr = force_operand (addr, NULL_RTX);
+ if (TARGET_OPEN_VMS)
+ addr = plus_constant (virtual_incoming_args_rtx,
+ NUM_ARGS <= 5 + stdarg
+ ? UNITS_PER_WORD : - 6 * UNITS_PER_WORD);
+ else
+ addr = (NUM_ARGS <= 5 + stdarg
+ ? plus_constant (virtual_incoming_args_rtx,
+ 6 * UNITS_PER_WORD)
+ : plus_constant (virtual_incoming_args_rtx,
+ - (6 * UNITS_PER_WORD)));
- /* Allocate the va_list constructor */
- block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
- RTX_UNCHANGING_P (block) = 1;
- RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
+ /* For VMS, we include the argsize, while on Unix, it's handled as
+ a separate field. */
+ if (TARGET_OPEN_VMS)
+ addr = plus_constant (addr, INTVAL (argsize));
- /* Store the address of the first integer register in the __base member. */
+ addr = force_operand (addr, NULL_RTX);
#ifdef POINTERS_EXTEND_UNSIGNED
addr = convert_memory_address (ptr_mode, addr);
#endif
- emit_move_insn (change_address (block, ptr_mode, XEXP (block, 0)), addr);
-
- /* Store the argsize as the __va_offset member. */
- emit_move_insn (change_address (block, TYPE_MODE (integer_type_node),
- plus_constant (XEXP (block, 0),
- POINTER_SIZE/BITS_PER_UNIT)),
- argsize);
-
- /* Return the address of the va_list constructor, but don't put it in a
- register. Doing so would fail when not optimizing and produce worse
- code when optimizing. */
- return XEXP (block, 0);
+ if (TARGET_OPEN_VMS)
+ return addr;
+ else
+ {
+ /* Allocate the va_list constructor */
+ block = assign_stack_local (BLKmode, 2 * UNITS_PER_WORD, BITS_PER_WORD);
+ RTX_UNCHANGING_P (block) = 1;
+ RTX_UNCHANGING_P (XEXP (block, 0)) = 1;
+
+ /* Store the address of the first integer register in the __base
+ member. */
+
+ dest = change_address (block, ptr_mode, XEXP (block, 0));
+ emit_move_insn (dest, addr);
+
+ if (flag_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ dest, ptr_mode,
+ GEN_INT (GET_MODE_SIZE (ptr_mode)),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ /* Store the argsize as the __va_offset member. */
+ dest = change_address (block, TYPE_MODE (integer_type_node),
+ plus_constant (XEXP (block, 0),
+ POINTER_SIZE/BITS_PER_UNIT));
+ emit_move_insn (dest, argsize);
+
+ if (flag_check_memory_usage)
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ dest, ptr_mode,
+ GEN_INT (GET_MODE_SIZE
+ (TYPE_MODE (integer_type_node))),
+ TYPE_MODE (sizetype),
+ GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ /* Return the address of the va_list constructor, but don't put it in a
+ register. Doing so would fail when not optimizing and produce worse
+ code when optimizing. */
+ return XEXP (block, 0);
+ }
}
/* This page contains routines that are used to determine what the function
@@ -1202,43 +2988,203 @@ alpha_builtin_saveregs (arglist)
/* Compute the size of the save area in the stack. */
+/* These variables are used for communication between the following functions.
+ They indicate various things about the current function being compiled
+ that are used to tell what kind of prologue, epilogue and procedure
+ descriptior to generate. */
+
+/* Nonzero if we need a stack procedure. */
+static int vms_is_stack_procedure;
+
+/* Register number (either FP or SP) that is used to unwind the frame. */
+static int vms_unwind_regno;
+
+/* Register number used to save FP. We need not have one for RA since
+ we don't modify it for register procedures. This is only defined
+ for register frame procedures. */
+static int vms_save_fp_regno;
+
+/* Register number used to reference objects off our PV. */
+static int vms_base_regno;
+
+/* Compute register masks for saved registers. */
+
+static void
+alpha_sa_mask (imaskP, fmaskP)
+ unsigned long *imaskP;
+ unsigned long *fmaskP;
+{
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ int i;
+
+#ifdef ASM_OUTPUT_MI_THUNK
+ if (!current_function_is_thunk)
+#endif
+ {
+ if (TARGET_OPEN_VMS && vms_is_stack_procedure)
+ imask |= (1L << HARD_FRAME_POINTER_REGNUM);
+
+ /* One for every register we have to save. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i]
+ && regs_ever_live[i] && i != REG_RA)
+ {
+ if (i < 32)
+ imask |= (1L << i);
+ else
+ fmask |= (1L << (i - 32));
+ }
+
+ if (imask || fmask || alpha_ra_ever_killed ())
+ imask |= (1L << REG_RA);
+ }
+
+ *imaskP = imask;
+ *fmaskP = fmask;
+}
+
int
alpha_sa_size ()
{
- int size = 0;
+ int sa_size = 0;
int i;
- for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i])
- size++;
+#ifdef ASM_OUTPUT_MI_THUNK
+ if (current_function_is_thunk)
+ sa_size = 0;
+ else
+#endif
+ {
+ /* One for every register we have to save. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (! fixed_regs[i] && ! call_used_regs[i]
+ && regs_ever_live[i] && i != REG_RA)
+ sa_size++;
+ }
- /* If some registers were saved but not reg 26, reg 26 must also
- be saved, so leave space for it. */
- if (size != 0 && ! regs_ever_live[26])
- size++;
+ if (TARGET_OPEN_VMS)
+ {
+ /* Start by assuming we can use a register procedure if we don't
+ make any calls (REG_RA not used) or need to save any
+ registers and a stack procedure if we do. */
+ vms_is_stack_procedure = sa_size != 0 || alpha_ra_ever_killed ();
+
+ /* Decide whether to refer to objects off our PV via FP or PV.
+ If we need FP for something else or if we receive a nonlocal
+ goto (which expects PV to contain the value), we must use PV.
+ Otherwise, start by assuming we can use FP. */
+ vms_base_regno = (frame_pointer_needed
+ || current_function_has_nonlocal_label
+ || vms_is_stack_procedure
+ || current_function_outgoing_args_size
+ ? REG_PV : HARD_FRAME_POINTER_REGNUM);
+
+ /* If we want to copy PV into FP, we need to find some register
+ in which to save FP. */
+
+ vms_save_fp_regno = -1;
+ if (vms_base_regno == HARD_FRAME_POINTER_REGNUM)
+ for (i = 0; i < 32; i++)
+ if (! fixed_regs[i] && call_used_regs[i] && ! regs_ever_live[i])
+ vms_save_fp_regno = i;
+
+ if (vms_save_fp_regno == -1)
+ vms_base_regno = REG_PV, vms_is_stack_procedure = 1;
+
+ /* Stack unwinding should be done via FP unless we use it for PV. */
+ vms_unwind_regno = (vms_base_regno == REG_PV
+ ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
+
+ /* If this is a stack procedure, allow space for saving FP and RA. */
+ if (vms_is_stack_procedure)
+ sa_size += 2;
+ }
+ else
+ {
+ /* If some registers were saved but not RA, RA must also be saved,
+ so leave space for it. */
+ if (sa_size != 0 || alpha_ra_ever_killed ())
+ sa_size++;
+
+ /* Our size must be even (multiple of 16 bytes). */
+ if (sa_size & 1)
+ sa_size++;
+ }
- /* Our size must be even (multiple of 16 bytes). */
- if (size & 1)
- size ++;
+ return sa_size * 8;
+}
- return size * 8;
+int
+alpha_pv_save_size ()
+{
+ alpha_sa_size ();
+ return vms_is_stack_procedure ? 8 : 0;
}
-/* Return 1 if this function can directly return via $26. */
+int
+alpha_using_fp ()
+{
+ alpha_sa_size ();
+ return vms_unwind_regno == HARD_FRAME_POINTER_REGNUM;
+}
int
-direct_return ()
+vms_valid_decl_attribute_p (decl, attributes, identifier, args)
+ tree decl ATTRIBUTE_UNUSED;
+ tree attributes ATTRIBUTE_UNUSED;
+ tree identifier;
+ tree args;
{
- return (reload_completed && alpha_sa_size () == 0
- && get_frame_size () == 0
- && current_function_outgoing_args_size == 0
- && current_function_pretend_args_size == 0);
+ if (is_attribute_p ("overlaid", identifier))
+ return (args == NULL_TREE);
+ return 0;
+}
+
+static int
+alpha_does_function_need_gp ()
+{
+ rtx insn;
+
+ /* We never need a GP for Windows/NT or VMS. */
+ if (TARGET_WINDOWS_NT || TARGET_OPEN_VMS)
+ return 0;
+
+#ifdef TARGET_PROFILING_NEEDS_GP
+ if (profile_flag)
+ return 1;
+#endif
+
+#ifdef ASM_OUTPUT_MI_THUNK
+ if (current_function_is_thunk)
+ return 1;
+#endif
+
+ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
+ Even if we are a static function, we still need to do this in case
+ our address is taken and passed to something like qsort. */
+
+ push_topmost_sequence ();
+ insn = get_insns ();
+ pop_topmost_sequence ();
+
+ for (; insn; insn = NEXT_INSN (insn))
+ if (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
+ && GET_CODE (PATTERN (insn)) != USE
+ && GET_CODE (PATTERN (insn)) != CLOBBER)
+ {
+ enum attr_type type = get_attr_type (insn);
+ if (type == TYPE_LDSYM || type == TYPE_JSR)
+ return 1;
+ }
+
+ return 0;
}
/* Write a version stamp. Don't write anything if we are running as a
cross-compiler. Otherwise, use the versions in /usr/include/stamp.h. */
-#if !defined(CROSS_COMPILE) && !defined(_WIN32)
+#ifdef HAVE_STAMP_H
#include <stamp.h>
#endif
@@ -1251,145 +3197,58 @@ alpha_write_verstamp (file)
#endif
}
-/* Write code to add constant C to register number IN_REG (possibly 31)
- and put the result into OUT_REG. Use TEMP_REG as a scratch register;
- usually this will be OUT_REG, but should not be if OUT_REG is
- STACK_POINTER_REGNUM, since it must be updated in a single instruction.
- Write the code to FILE. */
-
-static void
-add_long_const (file, c, in_reg, out_reg, temp_reg)
- FILE *file;
- HOST_WIDE_INT c;
- int in_reg, out_reg, temp_reg;
-{
- HOST_WIDE_INT low = (c & 0xffff) - 2 * (c & 0x8000);
- HOST_WIDE_INT tmp1 = c - low;
- HOST_WIDE_INT high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
- HOST_WIDE_INT extra = 0;
-
- /* We don't have code to write out constants larger than 32 bits. */
-#if HOST_BITS_PER_LONG_INT == 64
- if ((unsigned HOST_WIDE_INT) c >> 32 != 0)
- abort ();
-#endif
-
- /* If HIGH will be interpreted as negative, we must adjust it to do two
- ldha insns. Note that we will never be building a negative constant
- here. */
-
- if (high & 0x8000)
- {
- extra = 0x4000;
- tmp1 -= 0x40000000;
- high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000);
- }
-
- if (low != 0)
- {
- int result_reg = (extra == 0 && high == 0) ? out_reg : temp_reg;
-
- if (low >= 0 && low < 255)
- fprintf (file, "\taddq $%d,%d,$%d\n", in_reg, low, result_reg);
- else
- fprintf (file, "\tlda $%d,%d($%d)\n", result_reg, low, in_reg);
-
- in_reg = result_reg;
- }
+/* Write function prologue. */
- if (extra)
- {
- int result_reg = (high == 0) ? out_reg : temp_reg;
+/* On vms we have two kinds of functions:
- fprintf (file, "\tldah $%d,%d($%d)\n", result_reg, extra, in_reg);
- in_reg = result_reg;
- }
+ - stack frame (PROC_STACK)
+ these are 'normal' functions with local vars and which are
+ calling other functions
+ - register frame (PROC_REGISTER)
+ keeps all data in registers, needs no stack
- if (high)
- fprintf (file, "\tldah $%d,%d($%d)\n", out_reg, high, in_reg);
-}
+ We must pass this to the assembler so it can generate the
+ proper pdsc (procedure descriptor)
+ This is done with the '.pdesc' command.
-/* Write function prologue. */
+ On not-vms, we don't really differentiate between the two, as we can
+ simply allocate stack without saving registers. */
void
-output_prolog (file, size)
- FILE *file;
- int size;
-{
- HOST_WIDE_INT out_args_size
- = ALPHA_ROUND (current_function_outgoing_args_size);
- HOST_WIDE_INT sa_size = alpha_sa_size ();
- HOST_WIDE_INT frame_size
- = (out_args_size + sa_size
- + ALPHA_ROUND (size + current_function_pretend_args_size));
- HOST_WIDE_INT reg_offset = out_args_size;
- HOST_WIDE_INT start_reg_offset = reg_offset;
- HOST_WIDE_INT actual_start_reg_offset = start_reg_offset;
- int int_reg_save_area_size = 0;
- rtx insn;
- unsigned reg_mask = 0;
+alpha_expand_prologue ()
+{
+ /* Registers to save. */
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ HOST_WIDE_INT frame_size;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ rtx sa_reg;
int i;
- /* Ecoff can handle multiple .file directives, so put out file and lineno.
- We have to do that before the .ent directive as we cannot switch
- files within procedures with native ecoff because line numbers are
- linked to procedure descriptors.
- Outputting the lineno helps debugging of one line functions as they
- would otherwise get no line number at all. Please note that we would
- like to put out last_linenum from final.c, but it is not accessible. */
-
- if (write_symbols == SDB_DEBUG)
- {
- ASM_OUTPUT_SOURCE_FILENAME (file,
- DECL_SOURCE_FILE (current_function_decl));
- if (debug_info_level != DINFO_LEVEL_TERSE)
- ASM_OUTPUT_SOURCE_LINE (file,
- DECL_SOURCE_LINE (current_function_decl));
- }
-
- /* The assembly language programmer's guide states that the second argument
- to the .ent directive, the lex_level, is ignored by the assembler,
- so we might as well omit it. */
-
- fprintf (file, "\t.ent ");
- assemble_name (file, alpha_function_name);
- fprintf (file, "\n");
- ASM_OUTPUT_LABEL (file, alpha_function_name);
- inside_function = TRUE;
-
- /* Set up offsets to alpha virtual arg/local debugging pointer. */
+ sa_size = alpha_sa_size ();
- alpha_auto_offset = -frame_size + current_function_pretend_args_size;
- alpha_arg_offset = -frame_size + 48;
-
- /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first.
- Even if we are a static function, we still need to do this in case
- our address is taken and passed to something like qsort.
-
- We never need a GP for Windows/NT. */
-
- alpha_function_needs_gp = 0;
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- if ((GET_CODE (insn) == CALL_INSN)
- || (GET_RTX_CLASS (GET_CODE (insn)) == 'i'
- && GET_CODE (PATTERN (insn)) != USE
- && GET_CODE (PATTERN (insn)) != CLOBBER
- && (get_attr_type (insn) == TYPE_LDSYM
- || get_attr_type (insn) == TYPE_ISUBR)))
- {
- alpha_function_needs_gp = 1;
- break;
- }
+ frame_size = get_frame_size ();
+ if (TARGET_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (vms_is_stack_procedure ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
- if (WINDOWS_NT == 0)
- {
- if (alpha_function_needs_gp)
- fprintf (file, "\tldgp $29,0($27)\n");
+ if (TARGET_OPEN_VMS)
+ reg_offset = 8;
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
- /* Put a label after the GP load so we can enter the function at it. */
- assemble_name (file, alpha_function_name);
- fprintf (file, "..ng:\n");
- }
+ alpha_sa_mask (&imask, &fmask);
/* Adjust the stack by the frame size. If the frame size is > 4096
bytes, we need to be sure we probe somewhere in the first and last
@@ -1400,28 +3259,30 @@ output_prolog (file, size)
Note that we are only allowed to adjust sp once in the prologue. */
- if (frame_size < 32768)
+ if (frame_size <= 32768)
{
if (frame_size > 4096)
{
int probed = 4096;
- fprintf (file, "\tstq $31,-%d($30)\n", probed);
-
- while (probed + 8192 < frame_size)
- fprintf (file, "\tstq $31,-%d($30)\n", probed += 8192);
+ do
+ emit_insn (gen_probe_stack (GEN_INT (-probed)));
+ while ((probed += 8192) < frame_size);
/* We only have to do this probe if we aren't saving registers. */
if (sa_size == 0 && probed + 4096 < frame_size)
- fprintf (file, "\tstq $31,-%d($30)\n", frame_size);
+ emit_insn (gen_probe_stack (GEN_INT (-frame_size)));
}
if (frame_size != 0)
- fprintf (file, "\tlda $30,-%d($30)\n", frame_size);
+ {
+ emit_move_insn (stack_pointer_rtx,
+ plus_constant (stack_pointer_rtx, -frame_size));
+ }
}
else
{
- /* Here we generate code to set R4 to SP + 4096 and set R5 to the
+ /* Here we generate code to set R22 to SP + 4096 and set R23 to the
number of 8192 byte blocks to probe. We then probe each block
in the loop and then set SP to the proper location. If the
amount remaining is > 4096, we have to do one more probe if we
@@ -1429,182 +3290,538 @@ output_prolog (file, size)
HOST_WIDE_INT blocks = (frame_size + 4096) / 8192;
HOST_WIDE_INT leftover = frame_size + 4096 - blocks * 8192;
+ rtx ptr = gen_rtx_REG (DImode, 22);
+ rtx count = gen_rtx_REG (DImode, 23);
- add_long_const (file, blocks, 31, 5, 5);
+ emit_move_insn (count, GEN_INT (blocks));
+ emit_move_insn (ptr, plus_constant (stack_pointer_rtx, 4096));
- fprintf (file, "\tlda $4,4096($30)\n");
+ /* Because of the difficulty in emitting a new basic block this
+ late in the compilation, generate the loop as a single insn. */
+ emit_insn (gen_prologue_stack_probe_loop (count, ptr));
- assemble_name (file, alpha_function_name);
- fprintf (file, "..sc:\n");
+ if (leftover > 4096 && sa_size == 0)
+ {
+ rtx last = gen_rtx_MEM (DImode, plus_constant (ptr, -leftover));
+ MEM_VOLATILE_P (last) = 1;
+ emit_move_insn (last, const0_rtx);
+ }
- fprintf (file, "\tstq $31,-8192($4)\n");
- fprintf (file, "\tsubq $5,1,$5\n");
- fprintf (file, "\tlda $4,-8192($4)\n");
+ emit_move_insn (stack_pointer_rtx, plus_constant (ptr, -leftover));
+ }
- fprintf (file, "\tbne $5,");
- assemble_name (file, alpha_function_name);
- fprintf (file, "..sc\n");
+ /* Cope with very large offsets to the register save area. */
+ sa_reg = stack_pointer_rtx;
+ if (reg_offset + sa_size > 0x8000)
+ {
+ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT bias;
- if (leftover > 4096 && sa_size == 0)
- fprintf (file, "\tstq $31,-%d($4)\n", leftover);
+ if (low + sa_size <= 0x8000)
+ bias = reg_offset - low, reg_offset = low;
+ else
+ bias = reg_offset, reg_offset = 0;
- fprintf (file, "\tlda $30,-%d($4)\n", leftover);
+ sa_reg = gen_rtx_REG (DImode, 24);
+ emit_move_insn (sa_reg, plus_constant (stack_pointer_rtx, bias));
}
-
- /* Describe our frame. */
- fprintf (file, "\t.frame $%d,%d,$26,%d\n",
- (frame_pointer_needed
- ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM),
- frame_size, current_function_pretend_args_size);
- /* Save register 26 if any other register needs to be saved. */
- if (sa_size != 0)
+ /* Save regs in stack order. Beginning with VMS PV. */
+ if (TARGET_OPEN_VMS && vms_is_stack_procedure)
{
- reg_mask |= 1 << 26;
- fprintf (file, "\tstq $26,%d($30)\n", reg_offset);
+ emit_move_insn (gen_rtx_MEM (DImode, stack_pointer_rtx),
+ gen_rtx_REG (DImode, REG_PV));
+ }
+
+ /* Save register RA next. */
+ if (imask & (1L << REG_RA))
+ {
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (sa_reg, reg_offset)),
+ gen_rtx_REG (DImode, REG_RA));
+ imask &= ~(1L << REG_RA);
reg_offset += 8;
- int_reg_save_area_size += 8;
}
- /* Now save any other used integer registers required to be saved. */
+ /* Now save any other registers required to be saved. */
for (i = 0; i < 32; i++)
- if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i] && i != 26)
+ if (imask & (1L << i))
{
- reg_mask |= 1 << i;
- fprintf (file, "\tstq $%d,%d($30)\n", i, reg_offset);
+ emit_move_insn (gen_rtx_MEM (DImode,
+ plus_constant (sa_reg, reg_offset)),
+ gen_rtx_REG (DImode, i));
reg_offset += 8;
- int_reg_save_area_size += 8;
}
- /* Print the register mask and do floating-point saves. */
- if (reg_mask)
- fprintf (file, "\t.mask 0x%x,%d\n", reg_mask,
- actual_start_reg_offset - frame_size);
-
- start_reg_offset = reg_offset;
- reg_mask = 0;
-
for (i = 0; i < 32; i++)
- if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
- && regs_ever_live[i + 32])
+ if (fmask & (1L << i))
{
- reg_mask |= 1 << i;
- fprintf (file, "\tstt $f%d,%d($30)\n", i, reg_offset);
+ emit_move_insn (gen_rtx_MEM (DFmode,
+ plus_constant (sa_reg, reg_offset)),
+ gen_rtx_REG (DFmode, i+32));
reg_offset += 8;
}
- /* Print the floating-point mask, if we've saved any fp register. */
- if (reg_mask)
- fprintf (file, "\t.fmask 0x%x,%d\n", reg_mask,
- actual_start_reg_offset - frame_size + int_reg_save_area_size);
+ if (TARGET_OPEN_VMS)
+ {
+ if (!vms_is_stack_procedure)
+ {
+ /* Register frame procedures fave the fp. */
+ emit_move_insn (gen_rtx_REG (DImode, vms_save_fp_regno),
+ hard_frame_pointer_rtx);
+ }
+
+ if (vms_base_regno != REG_PV)
+ emit_move_insn (gen_rtx_REG (DImode, vms_base_regno),
+ gen_rtx_REG (DImode, REG_PV));
- /* If we need a frame pointer, set it from the stack pointer. Note that
- this must always be the last instruction in the prologue. */
- if (frame_pointer_needed)
- fprintf (file, "\tbis $30,$30,$15\n");
+ if (vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
+ {
+ emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ }
- /* End the prologue and say if we used gp. */
- fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
+ /* If we have to allocate space for outgoing args, do it now. */
+ if (current_function_outgoing_args_size != 0)
+ {
+ emit_move_insn (stack_pointer_rtx,
+ plus_constant (hard_frame_pointer_rtx,
+ - ALPHA_ROUND (current_function_outgoing_args_size)));
+ }
+ }
+ else
+ {
+ /* If we need a frame pointer, set it from the stack pointer. */
+ if (frame_pointer_needed)
+ {
+ if (TARGET_CAN_FAULT_IN_PROLOGUE)
+ emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
+ else
+ {
+ /* This must always be the last instruction in the
+ prologue, thus we emit a special move + clobber. */
+ emit_insn (gen_init_fp (hard_frame_pointer_rtx,
+ stack_pointer_rtx, sa_reg));
+ }
+ }
+ }
+
+ /* The ABIs for VMS and OSF/1 say that while we can schedule insns into
+ the prologue, for exception handling reasons, we cannot do this for
+ any insn that might fault. We could prevent this for mems with a
+ (clobber:BLK (scratch)), but this doesn't work for fp insns. So we
+ have to prevent all such scheduling with a blockage.
+
+ Linux, on the other hand, never bothered to implement OSF/1's
+ exception handling, and so doesn't care about such things. Anyone
+ planning to use dwarf2 frame-unwind info can also omit the blockage. */
+
+ if (! TARGET_CAN_FAULT_IN_PROLOGUE)
+ emit_insn (gen_blockage ());
}
-/* Write function epilogue. */
+/* Output the textual info surrounding the prologue. */
void
-output_epilog (file, size)
+alpha_start_function (file, fnname, decl)
FILE *file;
- int size;
-{
- rtx insn = get_last_insn ();
- HOST_WIDE_INT out_args_size
- = ALPHA_ROUND (current_function_outgoing_args_size);
- HOST_WIDE_INT sa_size = alpha_sa_size ();
- HOST_WIDE_INT frame_size
- = (out_args_size + sa_size
- + ALPHA_ROUND (size + current_function_pretend_args_size));
- HOST_WIDE_INT reg_offset = out_args_size;
- HOST_WIDE_INT frame_size_from_reg_save = frame_size - reg_offset;
- int restore_fp
- = frame_pointer_needed && regs_ever_live[HARD_FRAME_POINTER_REGNUM];
+ char *fnname;
+ tree decl ATTRIBUTE_UNUSED;
+{
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ HOST_WIDE_INT frame_size;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ char *entry_label = (char *) alloca (strlen (fnname) + 6);
int i;
- /* If the last insn was a BARRIER, we don't have to write anything except
- the .end pseudo-op. */
- if (GET_CODE (insn) == NOTE)
- insn = prev_nonnote_insn (insn);
- if (insn == 0 || GET_CODE (insn) != BARRIER)
+ sa_size = alpha_sa_size ();
+
+ frame_size = get_frame_size ();
+ if (TARGET_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (vms_is_stack_procedure ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
+
+ if (TARGET_OPEN_VMS)
+ reg_offset = 8;
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
+
+ alpha_sa_mask (&imask, &fmask);
+
+ /* Ecoff can handle multiple .file directives, so put out file and lineno.
+ We have to do that before the .ent directive as we cannot switch
+ files within procedures with native ecoff because line numbers are
+ linked to procedure descriptors.
+ Outputting the lineno helps debugging of one line functions as they
+ would otherwise get no line number at all. Please note that we would
+ like to put out last_linenum from final.c, but it is not accessible. */
+
+ if (write_symbols == SDB_DEBUG)
+ {
+ ASM_OUTPUT_SOURCE_FILENAME (file,
+ DECL_SOURCE_FILE (current_function_decl));
+ if (debug_info_level != DINFO_LEVEL_TERSE)
+ ASM_OUTPUT_SOURCE_LINE (file,
+ DECL_SOURCE_LINE (current_function_decl));
+ }
+
+ /* Issue function start and label. */
+ if (TARGET_OPEN_VMS || !flag_inhibit_size_directive)
+ {
+ fputs ("\t.ent ", file);
+ assemble_name (file, fnname);
+ putc ('\n', file);
+ }
+
+ strcpy (entry_label, fnname);
+ if (TARGET_OPEN_VMS)
+ strcat (entry_label, "..en");
+ ASM_OUTPUT_LABEL (file, entry_label);
+ inside_function = TRUE;
+
+ if (TARGET_OPEN_VMS)
+ fprintf (file, "\t.base $%d\n", vms_base_regno);
+
+ if (!TARGET_OPEN_VMS && TARGET_IEEE_CONFORMANT
+ && !flag_inhibit_size_directive)
+ {
+ /* Set flags in procedure descriptor to request IEEE-conformant
+ math-library routines. The value we set it to is PDSC_EXC_IEEE
+ (/usr/include/pdsc.h). */
+ fputs ("\t.eflag 48\n", file);
+ }
+
+ /* Set up offsets to alpha virtual arg/local debugging pointer. */
+ alpha_auto_offset = -frame_size + current_function_pretend_args_size;
+ alpha_arg_offset = -frame_size + 48;
+
+ /* Describe our frame. If the frame size is larger than an integer,
+ print it as zero to avoid an assembler error. We won't be
+ properly describing such a frame, but that's the best we can do. */
+ if (TARGET_OPEN_VMS)
{
- int fp_offset = 0;
+ fprintf (file, "\t.frame $%d,", vms_unwind_regno);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ frame_size >= (1l << 31) ? 0 : frame_size);
+ fputs (",$26,", file);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, reg_offset);
+ fputs ("\n", file);
+ }
+ else if (!flag_inhibit_size_directive)
+ {
+ fprintf (file, "\t.frame $%d,",
+ (frame_pointer_needed
+ ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ frame_size >= (1l << 31) ? 0 : frame_size);
+ fprintf (file, ",$26,%d\n", current_function_pretend_args_size);
+ }
+
+ /* Describe which registers were spilled. */
+ if (TARGET_OPEN_VMS)
+ {
+ if (imask)
+ /* ??? Does VMS care if mask contains ra? The old code did'nt
+ set it, so I don't here. */
+ fprintf (file, "\t.mask 0x%lx,0\n", imask & ~(1L << REG_RA));
+ if (fmask)
+ fprintf (file, "\t.fmask 0x%lx,0\n", fmask);
+ if (!vms_is_stack_procedure)
+ fprintf (file, "\t.fp_save $%d\n", vms_save_fp_regno);
+ }
+ else if (!flag_inhibit_size_directive)
+ {
+ if (imask)
+ {
+ fprintf (file, "\t.mask 0x%lx,", imask);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
+ putc ('\n', file);
+
+ for (i = 0; i < 32; ++i)
+ if (imask & (1L << i))
+ reg_offset += 8;
+ }
+
+ if (fmask)
+ {
+ fprintf (file, "\t.fmask 0x%lx,", fmask);
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC,
+ frame_size >= (1l << 31) ? 0 : reg_offset - frame_size);
+ putc ('\n', file);
+ }
+ }
+
+ /* Emit GP related things. It is rather unfortunate about the alignment
+ issues surrounding a CODE_LABEL that forces us to do the label in
+ plain text. */
+ if (!TARGET_OPEN_VMS && !TARGET_WINDOWS_NT)
+ {
+ alpha_function_needs_gp = alpha_does_function_need_gp ();
+ if (alpha_function_needs_gp)
+ fputs ("\tldgp $29,0($27)\n", file);
+
+ putc ('$', file);
+ assemble_name (file, fnname);
+ fputs ("..ng:\n", file);
+ }
+
+#ifdef OPEN_VMS
+ /* Ifdef'ed cause readonly_section and link_section are only
+ available then. */
+ readonly_section ();
+ fprintf (file, "\t.align 3\n");
+ assemble_name (file, fnname); fputs ("..na:\n", file);
+ fputs ("\t.ascii \"", file);
+ assemble_name (file, fnname);
+ fputs ("\\0\"\n", file);
+
+ link_section ();
+ fprintf (file, "\t.align 3\n");
+ fputs ("\t.name ", file);
+ assemble_name (file, fnname);
+ fputs ("..na\n", file);
+ ASM_OUTPUT_LABEL (file, fnname);
+ fprintf (file, "\t.pdesc ");
+ assemble_name (file, fnname);
+ fprintf (file, "..en,%s\n", vms_is_stack_procedure ? "stack" : "reg");
+ alpha_need_linkage (fnname, 1);
+ text_section ();
+#endif
+}
+
+/* Emit the .prologue note at the scheduled end of the prologue. */
+
+void
+output_end_prologue (file)
+ FILE *file;
+{
+ if (TARGET_OPEN_VMS)
+ fputs ("\t.prologue\n", file);
+ else if (TARGET_WINDOWS_NT)
+ fputs ("\t.prologue 0\n", file);
+ else if (!flag_inhibit_size_directive)
+ fprintf (file, "\t.prologue %d\n", alpha_function_needs_gp);
+}
+
+/* Write function epilogue. */
+
+void
+alpha_expand_epilogue ()
+{
+ /* Registers to save. */
+ unsigned long imask = 0;
+ unsigned long fmask = 0;
+ /* Stack space needed for pushing registers clobbered by us. */
+ HOST_WIDE_INT sa_size;
+ /* Complete stack size needed. */
+ HOST_WIDE_INT frame_size;
+ /* Offset from base reg to register save area. */
+ HOST_WIDE_INT reg_offset;
+ int fp_is_frame_pointer, fp_offset;
+ rtx sa_reg, sa_reg_exp = NULL;
+ rtx sp_adj1, sp_adj2;
+ int i;
+
+ sa_size = alpha_sa_size ();
+
+ frame_size = get_frame_size ();
+ if (TARGET_OPEN_VMS)
+ frame_size = ALPHA_ROUND (sa_size
+ + (vms_is_stack_procedure ? 8 : 0)
+ + frame_size
+ + current_function_pretend_args_size);
+ else
+ frame_size = (ALPHA_ROUND (current_function_outgoing_args_size)
+ + sa_size
+ + ALPHA_ROUND (frame_size
+ + current_function_pretend_args_size));
+
+ if (TARGET_OPEN_VMS)
+ reg_offset = 8;
+ else
+ reg_offset = ALPHA_ROUND (current_function_outgoing_args_size);
+ alpha_sa_mask (&imask, &fmask);
+
+ fp_is_frame_pointer = ((TARGET_OPEN_VMS && vms_is_stack_procedure)
+ || (!TARGET_OPEN_VMS && frame_pointer_needed));
+
+ if (sa_size)
+ {
/* If we have a frame pointer, restore SP from it. */
- if (frame_pointer_needed)
- fprintf (file, "\tbis $15,$15,$30\n");
+ if ((TARGET_OPEN_VMS
+ && vms_unwind_regno == HARD_FRAME_POINTER_REGNUM)
+ || (!TARGET_OPEN_VMS && frame_pointer_needed))
+ {
+ emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
+ }
- /* Restore all the registers, starting with the return address
- register. */
- if (sa_size != 0)
+ /* Cope with very large offsets to the register save area. */
+ sa_reg = stack_pointer_rtx;
+ if (reg_offset + sa_size > 0x8000)
{
- fprintf (file, "\tldq $26,%d($30)\n", reg_offset);
- reg_offset += 8;
+ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000;
+ HOST_WIDE_INT bias;
+
+ if (low + sa_size <= 0x8000)
+ bias = reg_offset - low, reg_offset = low;
+ else
+ bias = reg_offset, reg_offset = 0;
+
+ sa_reg = gen_rtx_REG (DImode, 22);
+ sa_reg_exp = plus_constant (stack_pointer_rtx, bias);
+
+ emit_move_insn (sa_reg, sa_reg_exp);
}
+
+ /* Restore registers in order, excepting a true frame pointer. */
- /* Now restore any other used integer registers that that we saved,
- except for FP if it is being used as FP, since it must be
- restored last. */
+ emit_move_insn (gen_rtx_REG (DImode, REG_RA),
+ gen_rtx_MEM (DImode, plus_constant(sa_reg, reg_offset)));
+ reg_offset += 8;
+ imask &= ~(1L << REG_RA);
- for (i = 0; i < 32; i++)
- if (! fixed_regs[i] && ! call_used_regs[i] && regs_ever_live[i]
- && i != 26)
+ for (i = 0; i < 32; ++i)
+ if (imask & (1L << i))
{
- if (i == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
+ if (i == HARD_FRAME_POINTER_REGNUM && fp_is_frame_pointer)
fp_offset = reg_offset;
else
- fprintf (file, "\tldq $%d,%d($30)\n", i, reg_offset);
+ {
+ emit_move_insn (gen_rtx_REG (DImode, i),
+ gen_rtx_MEM (DImode,
+ plus_constant(sa_reg,
+ reg_offset)));
+ }
reg_offset += 8;
}
- for (i = 0; i < 32; i++)
- if (! fixed_regs[i + 32] && ! call_used_regs[i + 32]
- && regs_ever_live[i + 32])
+ for (i = 0; i < 32; ++i)
+ if (fmask & (1L << i))
{
- fprintf (file, "\tldt $f%d,%d($30)\n", i, reg_offset);
+ emit_move_insn (gen_rtx_REG (DFmode, i+32),
+ gen_rtx_MEM (DFmode,
+ plus_constant(sa_reg, reg_offset)));
reg_offset += 8;
}
+ }
- /* If the stack size is large and we have a frame pointer, compute the
- size of the stack into a register because the old FP restore, stack
- pointer adjust, and return are required to be consecutive
- instructions. */
- if (frame_size > 32767 && restore_fp)
- add_long_const (file, frame_size, 31, 1, 1);
-
- /* If we needed a frame pointer and we have to restore it, do it
- now. This must be done in one instruction immediately
- before the SP update. */
- if (restore_fp && fp_offset)
- fprintf (file, "\tldq $15,%d($30)\n", fp_offset);
-
- /* Now update the stack pointer, if needed. Only one instruction must
- modify the stack pointer. It must be the last instruction in the
- sequence and must be an ADDQ or LDA instruction. If the frame
- pointer was loaded above, we may only put one instruction here. */
-
- if (frame_size > 32768 && restore_fp)
- fprintf (file, "\taddq $1,$30,$30\n");
+ if (frame_size)
+ {
+ /* If the stack size is large, begin computation into a temporary
+ register so as not to interfere with a potential fp restore,
+ which must be consecutive with an SP restore. */
+ if (frame_size < 32768)
+ {
+ sp_adj1 = stack_pointer_rtx;
+ sp_adj2 = GEN_INT (frame_size);
+ }
+ else if (frame_size < 0x40007fffL)
+ {
+ int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000;
+
+ sp_adj2 = plus_constant (stack_pointer_rtx, frame_size - low);
+ if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2))
+ sp_adj1 = sa_reg;
+ else
+ {
+ sp_adj1 = gen_rtx_REG (DImode, 23);
+ emit_move_insn (sp_adj1, sp_adj2);
+ }
+ sp_adj2 = GEN_INT (low);
+ }
else
- add_long_const (file, frame_size, 30, 30, 1);
+ {
+ sp_adj2 = gen_rtx_REG (DImode, 23);
+ sp_adj1 = alpha_emit_set_const (sp_adj2, DImode, frame_size, 3);
+ if (!sp_adj1)
+ {
+ /* We can't drop new things to memory this late, afaik,
+ so build it up by pieces. */
+#if HOST_BITS_PER_WIDE_INT == 64
+ sp_adj1 = alpha_emit_set_long_const (sp_adj2, frame_size);
+ if (!sp_adj1)
+ abort ();
+#else
+ abort ();
+#endif
+ }
+ sp_adj2 = stack_pointer_rtx;
+ }
+
+ /* From now on, things must be in order. So emit blockages. */
+
+ /* Restore the frame pointer. */
+ if (fp_is_frame_pointer)
+ {
+ emit_insn (gen_blockage ());
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_MEM (DImode,
+ plus_constant(sa_reg, fp_offset)));
+ }
+ else if (TARGET_OPEN_VMS)
+ {
+ emit_insn (gen_blockage ());
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ }
- /* Finally return to the caller. */
- fprintf (file, "\tret $31,($26),1\n");
+ /* Restore the stack pointer. */
+ emit_insn (gen_blockage ());
+ emit_move_insn (stack_pointer_rtx,
+ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2));
+ }
+ else
+ {
+ if (TARGET_OPEN_VMS && !vms_is_stack_procedure)
+ {
+ emit_insn (gen_blockage ());
+ emit_move_insn (hard_frame_pointer_rtx,
+ gen_rtx_REG (DImode, vms_save_fp_regno));
+ }
}
+ /* Return. */
+ emit_jump_insn (gen_return_internal ());
+}
+
+/* Output the rest of the textual info surrounding the epilogue. */
+
+void
+alpha_end_function (file, fnname, decl)
+ FILE *file;
+ char *fnname;
+ tree decl ATTRIBUTE_UNUSED;
+{
/* End the function. */
- fprintf (file, "\t.end ");
- assemble_name (file, alpha_function_name);
- fprintf (file, "\n");
+ if (!flag_inhibit_size_directive)
+ {
+ fputs ("\t.end ", file);
+ assemble_name (file, fnname);
+ putc ('\n', file);
+ }
inside_function = FALSE;
- /* Show that we know this function if it is called again. */
- SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
+ /* Show that we know this function if it is called again.
+
+ Don't do this for global functions in object files destined for a
+ shared library because the function may be overridden by the application
+ or other libraries.
+ ??? Is this just ELF? */
+
+ if (!flag_pic || !TREE_PUBLIC (current_function_decl))
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (current_function_decl), 0)) = 1;
}
/* Debugging support. */
@@ -1655,7 +3872,7 @@ alpha_output_filename (stream, name)
fprintf (stream, "\t#@stabs\n");
}
- else if (!TARGET_GAS && write_symbols == DBX_DEBUG)
+ else if (write_symbols == DBX_DEBUG)
{
ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
fprintf (stream, "%s ", ASM_STABS_OP);
@@ -1664,7 +3881,7 @@ alpha_output_filename (stream, name)
}
else if (name != current_function_file
- && strcmp (name, current_function_file) != 0)
+ && strcmp (name, current_function_file) != 0)
{
if (inside_function && ! TARGET_GAS)
fprintf (stream, "\t#.file\t%d ", num_source_filenames);
@@ -1687,7 +3904,7 @@ alpha_output_lineno (stream, line)
FILE *stream;
int line;
{
- if (! TARGET_GAS && write_symbols == DBX_DEBUG)
+ if (write_symbols == DBX_DEBUG)
{
/* mips-tfile doesn't understand .stabd directives. */
++sym_lineno;
@@ -1697,3 +3914,543 @@ alpha_output_lineno (stream, line)
else
fprintf (stream, "\n\t.loc\t%d %d\n", num_source_filenames, line);
}
+
+/* Structure to show the current status of registers and memory. */
+
+struct shadow_summary
+{
+ struct {
+ unsigned long i : 31; /* Mask of int regs */
+ unsigned long fp : 31; /* Mask of fp regs */
+ unsigned long mem : 1; /* mem == imem | fpmem */
+ } used, defd;
+};
+
+static void summarize_insn PROTO((rtx, struct shadow_summary *, int));
+static void alpha_handle_trap_shadows PROTO((rtx));
+
+/* Summary the effects of expression X on the machine. Update SUM, a pointer
+ to the summary structure. SET is nonzero if the insn is setting the
+ object, otherwise zero. */
+
+static void
+summarize_insn (x, sum, set)
+ rtx x;
+ struct shadow_summary *sum;
+ int set;
+{
+ char *format_ptr;
+ int i, j;
+
+ if (x == 0)
+ return;
+
+ switch (GET_CODE (x))
+ {
+ /* ??? Note that this case would be incorrect if the Alpha had a
+ ZERO_EXTRACT in SET_DEST. */
+ case SET:
+ summarize_insn (SET_SRC (x), sum, 0);
+ summarize_insn (SET_DEST (x), sum, 1);
+ break;
+
+ case CLOBBER:
+ summarize_insn (XEXP (x, 0), sum, 1);
+ break;
+
+ case USE:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ case ASM_OPERANDS:
+ for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--)
+ summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0);
+ break;
+
+ case PARALLEL:
+ for (i = XVECLEN (x, 0) - 1; i >= 0; i--)
+ summarize_insn (XVECEXP (x, 0, i), sum, 0);
+ break;
+
+ case SUBREG:
+ summarize_insn (SUBREG_REG (x), sum, 0);
+ break;
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ unsigned long mask = 1UL << (regno % 32);
+
+ if (regno == 31 || regno == 63)
+ break;
+
+ if (set)
+ {
+ if (regno < 32)
+ sum->defd.i |= mask;
+ else
+ sum->defd.fp |= mask;
+ }
+ else
+ {
+ if (regno < 32)
+ sum->used.i |= mask;
+ else
+ sum->used.fp |= mask;
+ }
+ }
+ break;
+
+ case MEM:
+ if (set)
+ sum->defd.mem = 1;
+ else
+ sum->used.mem = 1;
+
+ /* Find the regs used in memory address computation: */
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ case CONST_INT: case CONST_DOUBLE:
+ case SYMBOL_REF: case LABEL_REF: case CONST:
+ break;
+
+ /* Handle common unary and binary ops for efficiency. */
+ case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
+ case MOD: case UDIV: case UMOD: case AND: case IOR:
+ case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
+ case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
+ case NE: case EQ: case GE: case GT: case LE:
+ case LT: case GEU: case GTU: case LEU: case LTU:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ summarize_insn (XEXP (x, 1), sum, 0);
+ break;
+
+ case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
+ case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
+ case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
+ case SQRT: case FFS:
+ summarize_insn (XEXP (x, 0), sum, 0);
+ break;
+
+ default:
+ format_ptr = GET_RTX_FORMAT (GET_CODE (x));
+ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
+ switch (format_ptr[i])
+ {
+ case 'e':
+ summarize_insn (XEXP (x, i), sum, 0);
+ break;
+
+ case 'E':
+ for (j = XVECLEN (x, i) - 1; j >= 0; j--)
+ summarize_insn (XVECEXP (x, i, j), sum, 0);
+ break;
+
+ case 'i':
+ break;
+
+ default:
+ abort ();
+ }
+ }
+}
+
+/* Ensure a sufficient number of `trapb' insns are in the code when
+ the user requests code with a trap precision of functions or
+ instructions.
+
+ In naive mode, when the user requests a trap-precision of
+ "instruction", a trapb is needed after every instruction that may
+ generate a trap. This ensures that the code is resumption safe but
+ it is also slow.
+
+ When optimizations are turned on, we delay issuing a trapb as long
+ as possible. In this context, a trap shadow is the sequence of
+ instructions that starts with a (potentially) trap generating
+ instruction and extends to the next trapb or call_pal instruction
+ (but GCC never generates call_pal by itself). We can delay (and
+ therefore sometimes omit) a trapb subject to the following
+ conditions:
+
+ (a) On entry to the trap shadow, if any Alpha register or memory
+ location contains a value that is used as an operand value by some
+ instruction in the trap shadow (live on entry), then no instruction
+ in the trap shadow may modify the register or memory location.
+
+ (b) Within the trap shadow, the computation of the base register
+ for a memory load or store instruction may not involve using the
+ result of an instruction that might generate an UNPREDICTABLE
+ result.
+
+ (c) Within the trap shadow, no register may be used more than once
+ as a destination register. (This is to make life easier for the
+ trap-handler.)
+
+ (d) The trap shadow may not include any branch instructions. */
+
+static void
+alpha_handle_trap_shadows (insns)
+ rtx insns;
+{
+ struct shadow_summary shadow;
+ int trap_pending, exception_nesting;
+ rtx i;
+
+ if (alpha_tp == ALPHA_TP_PROG && !flag_exceptions)
+ return;
+
+ trap_pending = 0;
+ exception_nesting = 0;
+ shadow.used.i = 0;
+ shadow.used.fp = 0;
+ shadow.used.mem = 0;
+ shadow.defd = shadow.used;
+
+ for (i = insns; i ; i = NEXT_INSN (i))
+ {
+ if (GET_CODE (i) == NOTE)
+ {
+ switch (NOTE_LINE_NUMBER (i))
+ {
+ case NOTE_INSN_EH_REGION_BEG:
+ exception_nesting++;
+ if (trap_pending)
+ goto close_shadow;
+ break;
+
+ case NOTE_INSN_EH_REGION_END:
+ exception_nesting--;
+ if (trap_pending)
+ goto close_shadow;
+ break;
+
+ case NOTE_INSN_EPILOGUE_BEG:
+ if (trap_pending && alpha_tp >= ALPHA_TP_FUNC)
+ goto close_shadow;
+ break;
+ }
+ }
+ else if (trap_pending)
+ {
+ if (alpha_tp == ALPHA_TP_FUNC)
+ {
+ if (GET_CODE (i) == JUMP_INSN
+ && GET_CODE (PATTERN (i)) == RETURN)
+ goto close_shadow;
+ }
+ else if (alpha_tp == ALPHA_TP_INSN)
+ {
+ if (optimize > 0)
+ {
+ struct shadow_summary sum;
+
+ sum.used.i = 0;
+ sum.used.fp = 0;
+ sum.used.mem = 0;
+ sum.defd = sum.used;
+
+ switch (GET_CODE (i))
+ {
+ case INSN:
+ /* Annoyingly, get_attr_trap will abort on these. */
+ if (GET_CODE (PATTERN (i)) == USE
+ || GET_CODE (PATTERN (i)) == CLOBBER)
+ break;
+
+ summarize_insn (PATTERN (i), &sum, 0);
+
+ if ((sum.defd.i & shadow.defd.i)
+ || (sum.defd.fp & shadow.defd.fp))
+ {
+ /* (c) would be violated */
+ goto close_shadow;
+ }
+
+ /* Combine shadow with summary of current insn: */
+ shadow.used.i |= sum.used.i;
+ shadow.used.fp |= sum.used.fp;
+ shadow.used.mem |= sum.used.mem;
+ shadow.defd.i |= sum.defd.i;
+ shadow.defd.fp |= sum.defd.fp;
+ shadow.defd.mem |= sum.defd.mem;
+
+ if ((sum.defd.i & shadow.used.i)
+ || (sum.defd.fp & shadow.used.fp)
+ || (sum.defd.mem & shadow.used.mem))
+ {
+ /* (a) would be violated (also takes care of (b)) */
+ if (get_attr_trap (i) == TRAP_YES
+ && ((sum.defd.i & sum.used.i)
+ || (sum.defd.fp & sum.used.fp)))
+ abort ();
+
+ goto close_shadow;
+ }
+ break;
+
+ case JUMP_INSN:
+ case CALL_INSN:
+ case CODE_LABEL:
+ goto close_shadow;
+
+ default:
+ abort ();
+ }
+ }
+ else
+ {
+ close_shadow:
+ emit_insn_before (gen_trapb (), i);
+ trap_pending = 0;
+ shadow.used.i = 0;
+ shadow.used.fp = 0;
+ shadow.used.mem = 0;
+ shadow.defd = shadow.used;
+ }
+ }
+ }
+
+ if ((exception_nesting > 0 || alpha_tp >= ALPHA_TP_FUNC)
+ && GET_CODE (i) == INSN
+ && GET_CODE (PATTERN (i)) != USE
+ && GET_CODE (PATTERN (i)) != CLOBBER
+ && get_attr_trap (i) == TRAP_YES)
+ {
+ if (optimize && !trap_pending)
+ summarize_insn (PATTERN (i), &shadow, 0);
+ trap_pending = 1;
+ }
+ }
+}
+
+/* Machine dependant reorg pass. */
+
+void
+alpha_reorg (insns)
+ rtx insns;
+{
+ alpha_handle_trap_shadows (insns);
+}
+
+
+/* Check a floating-point value for validity for a particular machine mode. */
+
+static char * const float_strings[] =
+{
+ /* These are for FLOAT_VAX. */
+ "1.70141173319264430e+38", /* 2^127 (2^24 - 1) / 2^24 */
+ "-1.70141173319264430e+38",
+ "2.93873587705571877e-39", /* 2^-128 */
+ "-2.93873587705571877e-39",
+ /* These are for the default broken IEEE mode, which traps
+ on infinity or denormal numbers. */
+ "3.402823466385288598117e+38", /* 2^128 (1 - 2^-24) */
+ "-3.402823466385288598117e+38",
+ "1.1754943508222875079687e-38", /* 2^-126 */
+ "-1.1754943508222875079687e-38",
+};
+
+static REAL_VALUE_TYPE float_values[8];
+static int inited_float_values = 0;
+
+int
+check_float_value (mode, d, overflow)
+ enum machine_mode mode;
+ REAL_VALUE_TYPE *d;
+ int overflow ATTRIBUTE_UNUSED;
+{
+
+ if (TARGET_IEEE || TARGET_IEEE_CONFORMANT || TARGET_IEEE_WITH_INEXACT)
+ return 0;
+
+ if (inited_float_values == 0)
+ {
+ int i;
+ for (i = 0; i < 8; i++)
+ float_values[i] = REAL_VALUE_ATOF (float_strings[i], DFmode);
+
+ inited_float_values = 1;
+ }
+
+ if (mode == SFmode)
+ {
+ REAL_VALUE_TYPE r;
+ REAL_VALUE_TYPE *fvptr;
+
+ if (TARGET_FLOAT_VAX)
+ fvptr = &float_values[0];
+ else
+ fvptr = &float_values[4];
+
+ bcopy ((char *) d, (char *) &r, sizeof (REAL_VALUE_TYPE));
+ if (REAL_VALUES_LESS (fvptr[0], r))
+ {
+ bcopy ((char *) &fvptr[0], (char *) d,
+ sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (r, fvptr[1]))
+ {
+ bcopy ((char *) &fvptr[1], (char *) d,
+ sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (dconst0, r)
+ && REAL_VALUES_LESS (r, fvptr[2]))
+ {
+ bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ else if (REAL_VALUES_LESS (r, dconst0)
+ && REAL_VALUES_LESS (fvptr[3], r))
+ {
+ bcopy ((char *) &dconst0, (char *) d, sizeof (REAL_VALUE_TYPE));
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#if OPEN_VMS
+
+/* Return the VMS argument type corresponding to MODE. */
+
+enum avms_arg_type
+alpha_arg_type (mode)
+ enum machine_mode mode;
+{
+ switch (mode)
+ {
+ case SFmode:
+ return TARGET_FLOAT_VAX ? FF : FS;
+ case DFmode:
+ return TARGET_FLOAT_VAX ? FD : FT;
+ default:
+ return I64;
+ }
+}
+
+/* Return an rtx for an integer representing the VMS Argument Information
+ register value. */
+
+struct rtx_def *
+alpha_arg_info_reg_val (cum)
+ CUMULATIVE_ARGS cum;
+{
+ unsigned HOST_WIDE_INT regval = cum.num_args;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ regval |= ((int) cum.atypes[i]) << (i * 3 + 8);
+
+ return GEN_INT (regval);
+}
+
+/* Structure to collect function names for final output
+ in link section. */
+
+enum links_kind {KIND_UNUSED, KIND_LOCAL, KIND_EXTERN};
+
+
+struct alpha_links {
+ struct alpha_links *next;
+ char *name;
+ enum links_kind kind;
+};
+
+static struct alpha_links *alpha_links_base = 0;
+
+/* Make (or fake) .linkage entry for function call.
+
+ IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
+
+void
+alpha_need_linkage (name, is_local)
+ char *name;
+ int is_local;
+{
+ rtx x;
+ struct alpha_links *lptr, *nptr;
+
+ if (name[0] == '*')
+ name++;
+
+ /* Is this name already defined ? */
+
+ for (lptr = alpha_links_base; lptr; lptr = lptr->next)
+ if (strcmp (lptr->name, name) == 0)
+ {
+ if (is_local)
+ {
+ /* Defined here but external assumed. */
+ if (lptr->kind == KIND_EXTERN)
+ lptr->kind = KIND_LOCAL;
+ }
+ else
+ {
+ /* Used here but unused assumed. */
+ if (lptr->kind == KIND_UNUSED)
+ lptr->kind = KIND_LOCAL;
+ }
+ return;
+ }
+
+ nptr = (struct alpha_links *) xmalloc (sizeof (struct alpha_links));
+ nptr->next = alpha_links_base;
+ nptr->name = xstrdup (name);
+
+ /* Assume external if no definition. */
+ nptr->kind = (is_local ? KIND_UNUSED : KIND_EXTERN);
+
+ /* Ensure we have an IDENTIFIER so assemble_name can mark is used. */
+ get_identifier (name);
+
+ alpha_links_base = nptr;
+
+ return;
+}
+
+
+void
+alpha_write_linkage (stream)
+ FILE *stream;
+{
+ struct alpha_links *lptr, *nptr;
+
+ readonly_section ();
+
+ fprintf (stream, "\t.align 3\n");
+
+ for (lptr = alpha_links_base; lptr; lptr = nptr)
+ {
+ nptr = lptr->next;
+
+ if (lptr->kind == KIND_UNUSED
+ || ! TREE_SYMBOL_REFERENCED (get_identifier (lptr->name)))
+ continue;
+
+ fprintf (stream, "$%s..lk:\n", lptr->name);
+ if (lptr->kind == KIND_LOCAL)
+ {
+ /* Local and used, build linkage pair. */
+ fprintf (stream, "\t.quad %s..en\n", lptr->name);
+ fprintf (stream, "\t.quad %s\n", lptr->name);
+ }
+ else
+ /* External and used, request linkage pair. */
+ fprintf (stream, "\t.linkage %s\n", lptr->name);
+ }
+}
+
+#else
+
+void
+alpha_need_linkage (name, is_local)
+ char *name ATTRIBUTE_UNUSED;
+ int is_local ATTRIBUTE_UNUSED;
+{
+}
+
+#endif /* OPEN_VMS */
diff --git a/contrib/gcc/config/alpha/alpha.h b/contrib/gcc/config/alpha/alpha.h
index 9504c09..43b0dee 100644
--- a/contrib/gcc/config/alpha/alpha.h
+++ b/contrib/gcc/config/alpha/alpha.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for DEC Alpha.
- Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
This file is part of GNU CC.
@@ -20,21 +20,21 @@ the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* Names to predefine in the preprocessor for this target machine. */
-
-#define CPP_PREDEFINES "\
--Dunix -D__osf__ -D__alpha -D__alpha__ -D_LONGLONG -DSYSTYPE_BSD \
--D_SYSTYPE_BSD -Asystem(unix) -Asystem(xpg4) -Acpu(alpha) -Amachine(alpha)"
-
/* Write out the correct language type definition for the header files.
Unless we have assembler language, write out the symbols for C. */
#define CPP_SPEC "\
-%{!.S: -D__LANGUAGE_C__ -D__LANGUAGE_C %{!ansi:-DLANGUAGE_C}} \
-%{.S: -D__LANGUAGE_ASSEMBLY__ -D__LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY}} \
-%{.cc: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
-%{.cxx: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
-%{.C: -D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus} \
-%{.m: -D__LANGUAGE_OBJECTIVE_C__ -D__LANGUAGE_OBJECTIVE_C}"
+%{!undef:\
+%{.S:-D__LANGUAGE_ASSEMBLY__ -D__LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY }}\
+%{.cc|.cxx|.C:-D__LANGUAGE_C_PLUS_PLUS__ -D__LANGUAGE_C_PLUS_PLUS -D__cplusplus }\
+%{.m:-D__LANGUAGE_OBJECTIVE_C__ -D__LANGUAGE_OBJECTIVE_C }\
+%{!.S:%{!.cc:%{!.cxx:%{!.C:%{!.m:-D__LANGUAGE_C__ -D__LANGUAGE_C %{!ansi:-DLANGUAGE_C }}}}}}\
+%{mieee:-D_IEEE_FP }\
+%{mieee-with-inexact:-D_IEEE_FP -D_IEEE_FP_INEXACT }}\
+%(cpp_cpu) %(cpp_subtarget)"
+
+#ifndef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC ""
+#endif
/* Set the spec to use for signed char. The default tests the above macro
but DEC's compiler can't handle the conditional in a "constant"
@@ -42,18 +42,6 @@ Boston, MA 02111-1307, USA. */
#define SIGNED_CHAR_SPEC "%{funsigned-char:-D__CHAR_UNSIGNED__}"
-/* Under OSF/1, -p and -pg require -lprof1. */
-
-#define LIB_SPEC "%{p:-lprof1} %{pg:-lprof1} %{a:-lprof2} -lc"
-
-/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
- optimizing, -O1 if we are not. Pass -shared, -non_shared or
- -call_shared as appropriate. Also pass -pg. */
-#define LINK_SPEC \
- "-G 8 %{O*:-O3} %{!O*:-O1} %{static:-non_shared} \
- %{!static:%{shared:-shared} %{!shared:-call_shared}} %{pg} %{taso} \
- %{rpath*}"
-
#define WORD_SWITCH_TAKES_ARG(STR) \
(!strcmp (STR, "rpath") || !strcmp (STR, "include") \
|| !strcmp (STR, "imacros") || !strcmp (STR, "aux-info") \
@@ -61,62 +49,296 @@ Boston, MA 02111-1307, USA. */
|| !strcmp (STR, "iwithprefix") || !strcmp (STR, "iwithprefixbefore") \
|| !strcmp (STR, "isystem"))
-#define STARTFILE_SPEC \
- "%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}}"
-
/* Print subsidiary information on the compiler version in use. */
#define TARGET_VERSION
-/* Default this to not be compiling for Windows/NT. */
-#ifndef WINDOWS_NT
-#define WINDOWS_NT 0
-#endif
-
-/* Define the location for the startup file on OSF/1 for Alpha. */
-
-#define MD_STARTFILE_PREFIX "/usr/lib/cmplrs/cc/"
-
/* Run-time compilation parameters selecting different hardware subsets. */
+/* Which processor to schedule for. The cpu attribute defines a list that
+ mirrors this list, so changes to alpha.md must be made at the same time. */
+
+enum processor_type
+ {PROCESSOR_EV4, /* 2106[46]{a,} */
+ PROCESSOR_EV5, /* 21164{a,pc,} */
+ PROCESSOR_EV6}; /* 21264 */
+
+extern enum processor_type alpha_cpu;
+
+enum alpha_trap_precision
+{
+ ALPHA_TP_PROG, /* No precision (default). */
+ ALPHA_TP_FUNC, /* Trap contained within originating function. */
+ ALPHA_TP_INSN /* Instruction accuracy and code is resumption safe. */
+};
+
+enum alpha_fp_rounding_mode
+{
+ ALPHA_FPRM_NORM, /* Normal rounding mode. */
+ ALPHA_FPRM_MINF, /* Round towards minus-infinity. */
+ ALPHA_FPRM_CHOP, /* Chopped rounding mode (towards 0). */
+ ALPHA_FPRM_DYN /* Dynamic rounding mode. */
+};
+
+enum alpha_fp_trap_mode
+{
+ ALPHA_FPTM_N, /* Normal trap mode. */
+ ALPHA_FPTM_U, /* Underflow traps enabled. */
+ ALPHA_FPTM_SU, /* Software completion, w/underflow traps */
+ ALPHA_FPTM_SUI /* Software completion, w/underflow & inexact traps */
+};
+
extern int target_flags;
+extern enum alpha_trap_precision alpha_tp;
+extern enum alpha_fp_rounding_mode alpha_fprm;
+extern enum alpha_fp_trap_mode alpha_fptm;
+
/* This means that floating-point support exists in the target implementation
of the Alpha architecture. This is usually the default. */
-#define TARGET_FP (target_flags & 1)
+#define MASK_FP 1
+#define TARGET_FP (target_flags & MASK_FP)
/* This means that floating-point registers are allowed to be used. Note
that Alpha implementations without FP operations are required to
provide the FP registers. */
-#define TARGET_FPREGS (target_flags & 2)
+#define MASK_FPREGS 2
+#define TARGET_FPREGS (target_flags & MASK_FPREGS)
/* This means that gas is used to process the assembler file. */
#define MASK_GAS 4
#define TARGET_GAS (target_flags & MASK_GAS)
+/* This means that we should mark procedures as IEEE conformant. */
+
+#define MASK_IEEE_CONFORMANT 8
+#define TARGET_IEEE_CONFORMANT (target_flags & MASK_IEEE_CONFORMANT)
+
+/* This means we should be IEEE-compliant except for inexact. */
+
+#define MASK_IEEE 16
+#define TARGET_IEEE (target_flags & MASK_IEEE)
+
+/* This means we should be fully IEEE-compliant. */
+
+#define MASK_IEEE_WITH_INEXACT 32
+#define TARGET_IEEE_WITH_INEXACT (target_flags & MASK_IEEE_WITH_INEXACT)
+
+/* This means we must construct all constants rather than emitting
+ them as literal data. */
+
+#define MASK_BUILD_CONSTANTS 128
+#define TARGET_BUILD_CONSTANTS (target_flags & MASK_BUILD_CONSTANTS)
+
+/* This means we handle floating points in VAX F- (float)
+ or G- (double) Format. */
+
+#define MASK_FLOAT_VAX 512
+#define TARGET_FLOAT_VAX (target_flags & MASK_FLOAT_VAX)
+
+/* This means that the processor has byte and half word loads and stores
+ (the BWX extension). */
+
+#define MASK_BWX 1024
+#define TARGET_BWX (target_flags & MASK_BWX)
+
+/* This means that the processor has the CIX extension. */
+#define MASK_CIX 2048
+#define TARGET_CIX (target_flags & MASK_CIX)
+
+/* This means that the processor has the MAX extension. */
+#define MASK_MAX 4096
+#define TARGET_MAX (target_flags & MASK_MAX)
+
+/* This means that the processor is an EV5, EV56, or PCA56. This is defined
+ only in TARGET_CPU_DEFAULT. */
+#define MASK_CPU_EV5 8192
+
+/* Likewise for EV6. */
+#define MASK_CPU_EV6 16384
+
+/* This means we support the .arch directive in the assembler. Only
+ defined in TARGET_CPU_DEFAULT. */
+#define MASK_SUPPORT_ARCH 32768
+#define TARGET_SUPPORT_ARCH (target_flags & MASK_SUPPORT_ARCH)
+
+/* These are for target os support and cannot be changed at runtime. */
+#ifndef TARGET_WINDOWS_NT
+#define TARGET_WINDOWS_NT 0
+#endif
+#ifndef TARGET_OPEN_VMS
+#define TARGET_OPEN_VMS 0
+#endif
+
+#ifndef TARGET_AS_CAN_SUBTRACT_LABELS
+#define TARGET_AS_CAN_SUBTRACT_LABELS TARGET_GAS
+#endif
+#ifndef TARGET_CAN_FAULT_IN_PROLOGUE
+#define TARGET_CAN_FAULT_IN_PROLOGUE 0
+#endif
+
/* Macro to define tables used to set the flags.
This is a list in braces of pairs in braces,
each pair being { "NAME", VALUE }
where VALUE is the bits to set or minus the bits to clear.
An empty string NAME is used to identify the default VALUE. */
-#define TARGET_SWITCHES \
- { {"no-soft-float", 1}, \
- {"soft-float", -1}, \
- {"fp-regs", 2}, \
- {"no-fp-regs", -3}, \
- {"alpha-as", -MASK_GAS}, \
- {"gas", MASK_GAS}, \
+#define TARGET_SWITCHES \
+ { {"no-soft-float", MASK_FP}, \
+ {"soft-float", - MASK_FP}, \
+ {"fp-regs", MASK_FPREGS}, \
+ {"no-fp-regs", - (MASK_FP|MASK_FPREGS)}, \
+ {"alpha-as", -MASK_GAS}, \
+ {"gas", MASK_GAS}, \
+ {"ieee-conformant", MASK_IEEE_CONFORMANT}, \
+ {"ieee", MASK_IEEE|MASK_IEEE_CONFORMANT}, \
+ {"ieee-with-inexact", MASK_IEEE_WITH_INEXACT|MASK_IEEE_CONFORMANT}, \
+ {"build-constants", MASK_BUILD_CONSTANTS}, \
+ {"float-vax", MASK_FLOAT_VAX}, \
+ {"float-ieee", -MASK_FLOAT_VAX}, \
+ {"bwx", MASK_BWX}, \
+ {"no-bwx", -MASK_BWX}, \
+ {"cix", MASK_CIX}, \
+ {"no-cix", -MASK_CIX}, \
+ {"max", MASK_MAX}, \
+ {"no-max", -MASK_MAX}, \
{"", TARGET_DEFAULT | TARGET_CPU_DEFAULT} }
-#define TARGET_DEFAULT 3
+#define TARGET_DEFAULT MASK_FP|MASK_FPREGS
#ifndef TARGET_CPU_DEFAULT
#define TARGET_CPU_DEFAULT 0
#endif
+/* This macro is similar to `TARGET_SWITCHES' but defines names of
+ command options that have values. Its definition is an initializer
+ with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the fixed
+ part of the option name, and the address of a variable. The
+ variable, type `char *', is set to the variable part of the given
+ option if the fixed part matches. The actual option name is made
+ by appending `-m' to the specified name.
+
+ Here is an example which defines `-mshort-data-NUMBER'. If the
+ given option is `-mshort-data-512', the variable `m88k_short_data'
+ will be set to the string `"512"'.
+
+ extern char *m88k_short_data;
+ #define TARGET_OPTIONS { { "short-data-", &m88k_short_data } } */
+
+extern char *alpha_cpu_string; /* For -mcpu= */
+extern char *alpha_fprm_string; /* For -mfp-rounding-mode=[n|m|c|d] */
+extern char *alpha_fptm_string; /* For -mfp-trap-mode=[n|u|su|sui] */
+extern char *alpha_tp_string; /* For -mtrap-precision=[p|f|i] */
+extern char *alpha_mlat_string; /* For -mmemory-latency= */
+
+#define TARGET_OPTIONS \
+{ \
+ {"cpu=", &alpha_cpu_string}, \
+ {"fp-rounding-mode=", &alpha_fprm_string}, \
+ {"fp-trap-mode=", &alpha_fptm_string}, \
+ {"trap-precision=", &alpha_tp_string}, \
+ {"memory-latency=", &alpha_mlat_string}, \
+}
+
+/* Attempt to describe CPU characteristics to the preprocessor. */
+
+/* Corresponding to amask... */
+#define CPP_AM_BWX_SPEC "-D__alpha_bwx__ -Acpu(bwx)"
+#define CPP_AM_MAX_SPEC "-D__alpha_max__ -Acpu(max)"
+#define CPP_AM_CIX_SPEC "-D__alpha_cix__ -Acpu(cix)"
+
+/* Corresponding to implver... */
+#define CPP_IM_EV4_SPEC "-D__alpha_ev4__ -Acpu(ev4)"
+#define CPP_IM_EV5_SPEC "-D__alpha_ev5__ -Acpu(ev5)"
+#define CPP_IM_EV6_SPEC "-D__alpha_ev6__ -Acpu(ev6)"
+
+/* Common combinations. */
+#define CPP_CPU_EV4_SPEC "%(cpp_im_ev4)"
+#define CPP_CPU_EV5_SPEC "%(cpp_im_ev5)"
+#define CPP_CPU_EV56_SPEC "%(cpp_im_ev5) %(cpp_am_bwx)"
+#define CPP_CPU_PCA56_SPEC "%(cpp_im_ev5) %(cpp_am_bwx) %(cpp_am_max)"
+#define CPP_CPU_EV6_SPEC "%(cpp_im_ev6) %(cpp_am_bwx) %(cpp_am_max) %(cpp_am_cix)"
+
+#ifndef CPP_CPU_DEFAULT_SPEC
+# if TARGET_CPU_DEFAULT & MASK_CPU_EV6
+# define CPP_CPU_DEFAULT_SPEC CPP_CPU_EV6_SPEC
+# else
+# if TARGET_CPU_DEFAULT & MASK_CPU_EV5
+# if TARGET_CPU_DEFAULT & MASK_MAX
+# define CPP_CPU_DEFAULT_SPEC CPP_CPU_PCA56_SPEC
+# else
+# if TARGET_CPU_DEFAULT & MASK_BWX
+# define CPP_CPU_DEFAULT_SPEC CPP_CPU_EV56_SPEC
+# else
+# define CPP_CPU_DEFAULT_SPEC CPP_CPU_EV5_SPEC
+# endif
+# endif
+# else
+# define CPP_CPU_DEFAULT_SPEC CPP_CPU_EV4_SPEC
+# endif
+# endif
+#endif /* CPP_CPU_DEFAULT_SPEC */
+
+#ifndef CPP_CPU_SPEC
+#define CPP_CPU_SPEC "\
+%{!undef:-Acpu(alpha) -Amachine(alpha) -D__alpha -D__alpha__ \
+%{mcpu=ev4|mcpu=21064:%(cpp_cpu_ev4) }\
+%{mcpu=ev5|mcpu=21164:%(cpp_cpu_ev5) }\
+%{mcpu=ev56|mcpu=21164a:%(cpp_cpu_ev56) }\
+%{mcpu=pca56|mcpu=21164pc|mcpu=21164PC:%(cpp_cpu_pca56) }\
+%{mcpu=ev6|mcpu=21264:%(cpp_cpu_ev6) }\
+%{!mcpu*:%(cpp_cpu_default) }}"
+#endif
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define EXTRA_SPECS \
+ { "cpp_am_bwx", CPP_AM_BWX_SPEC }, \
+ { "cpp_am_max", CPP_AM_MAX_SPEC }, \
+ { "cpp_am_cix", CPP_AM_CIX_SPEC }, \
+ { "cpp_im_ev4", CPP_IM_EV4_SPEC }, \
+ { "cpp_im_ev5", CPP_IM_EV5_SPEC }, \
+ { "cpp_im_ev6", CPP_IM_EV6_SPEC }, \
+ { "cpp_cpu_ev4", CPP_CPU_EV4_SPEC }, \
+ { "cpp_cpu_ev5", CPP_CPU_EV5_SPEC }, \
+ { "cpp_cpu_ev56", CPP_CPU_EV56_SPEC }, \
+ { "cpp_cpu_pca56", CPP_CPU_PCA56_SPEC }, \
+ { "cpp_cpu_ev6", CPP_CPU_EV6_SPEC }, \
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC }, \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cpp_subtarget", CPP_SUBTARGET_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+
+/* Sometimes certain combinations of command options do not make sense
+ on a particular target machine. You can define a macro
+ `OVERRIDE_OPTIONS' to take account of this. This macro, if
+ defined, is executed once just after all the command options have
+ been parsed.
+
+ On the Alpha, it is used to translate target-option strings into
+ numeric values. */
+
+extern void override_options ();
+#define OVERRIDE_OPTIONS override_options ()
+
+
/* Define this macro to change register usage conditional on target flags.
On the Alpha, we use this to disable the floating-point registers when
@@ -135,6 +357,23 @@ extern int target_flags;
/* Define to enable software floating point emulation. */
#define REAL_ARITHMETIC
+/* The following #defines are used when compiling the routines in
+ libgcc1.c. Since the Alpha calling conventions require single
+ precision floats to be passed in the floating-point registers
+ (rather than in the general registers) we have to build the
+ libgcc1.c routines in such a way that they know the actual types
+ of their formal arguments and the actual types of their return
+ values. Otherwise, gcc will generate calls to the libgcc1.c
+ routines, passing arguments in the floating-point registers,
+ but the libgcc1.c routines will expect their arguments on the
+ stack (where the Alpha calling conventions require structs &
+ unions to be passed). */
+
+#define FLOAT_VALUE_TYPE double
+#define INTIFY(FLOATVAL) (FLOATVAL)
+#define FLOATIFY(INTVAL) (INTVAL)
+#define FLOAT_ARG_TYPE double
+
/* Define the size of `int'. The default is the same as the word size. */
#define INT_TYPE_SIZE 32
@@ -149,8 +388,8 @@ extern int target_flags;
#define DOUBLE_TYPE_SIZE 64
#define LONG_DOUBLE_TYPE_SIZE 64
-#define WCHAR_TYPE "short unsigned int"
-#define WCHAR_TYPE_SIZE 16
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE 32
/* Define this macro if it is advisable to hold scalars in registers
in a wider mode than that declared by the program. In such cases,
@@ -220,7 +459,7 @@ extern int target_flags;
#define STACK_BOUNDARY 64
/* Allocation boundary (in *bits*) for the code of a function. */
-#define FUNCTION_BOUNDARY 64
+#define FUNCTION_BOUNDARY 256
/* Alignment of field after `int : 0' in a structure. */
#define EMPTY_FIELD_BOUNDARY 64
@@ -237,31 +476,31 @@ extern int target_flags;
we don't optimize and also if we are writing ECOFF symbols to work around
a bug in DEC's assembler. */
-#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
- if (optimize > 0 && write_symbols != SDB_DEBUG) \
- ASM_OUTPUT_ALIGN (FILE, 5)
+#define LOOP_ALIGN(LABEL) \
+ (optimize > 0 && write_symbols != SDB_DEBUG ? 4 : 0)
-/* This is how to align an instruction for optimal branching.
- On Alpha we'll get better performance by aligning on a quadword
+/* This is how to align an instruction for optimal branching. On
+ Alpha we'll get better performance by aligning on an octaword
boundary. */
-#define ASM_OUTPUT_ALIGN_CODE(FILE) \
- if (optimize > 0 && write_symbols != SDB_DEBUG) \
- ASM_OUTPUT_ALIGN ((FILE), 4)
+#define ALIGN_LABEL_AFTER_BARRIER(FILE) \
+ (optimize > 0 && write_symbols != SDB_DEBUG ? 4 : 0)
/* No data type wants to be aligned rounder than this. */
#define BIGGEST_ALIGNMENT 64
-/* Make strings word-aligned so strcpy from constants will be faster. */
-#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
- (TREE_CODE (EXP) == STRING_CST \
- && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
-
-/* Make arrays of chars word-aligned for the same reasons. */
-#define DATA_ALIGNMENT(TYPE, ALIGN) \
- (TREE_CODE (TYPE) == ARRAY_TYPE \
- && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
- && (ALIGN) < BITS_PER_WORD ? BITS_PER_WORD : (ALIGN))
+/* For atomic access to objects, must have at least 32-bit alignment
+ unless the machine has byte operations. */
+#define MINIMUM_ATOMIC_ALIGNMENT (TARGET_BWX ? 8 : 32)
+
+/* Align all constants and variables to at least a word boundary so
+ we can pick up pieces of them faster. */
+/* ??? Only if block-move stuff knows about different source/destination
+ alignment. */
+#if 0
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD)
+#define DATA_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD)
+#endif
/* Set this non-zero if move instructions will actually fail to work
when given unaligned data.
@@ -325,11 +564,11 @@ extern int target_flags;
listed once, even those in FIXED_REGISTERS.
We allocate in the following order:
- $f1 (nonsaved floating-point register)
- $f10-$f15 (likewise)
+ $f10-$f15 (nonsaved floating-point register)
$f22-$f30 (likewise)
$f21-$f16 (likewise, but input args)
$f0 (nonsaved, but return value)
+ $f1 (nonsaved, but immediate before saved)
$f2-$f9 (saved floating-point registers)
$1-$8 (nonsaved integer registers)
$22-$25 (likewise)
@@ -344,11 +583,10 @@ extern int target_flags;
$30, $31, $f31 (stack pointer and always zero/ap & fp) */
#define REG_ALLOC_ORDER \
- {33, \
- 42, 43, 44, 45, 46, 47, \
+ {42, 43, 44, 45, 46, 47, \
54, 55, 56, 57, 58, 59, 60, 61, 62, \
53, 52, 51, 50, 49, 48, \
- 32, \
+ 32, 33, \
34, 35, 36, 37, 38, 39, 40, 41, \
1, 2, 3, 4, 5, 6, 7, 8, \
22, 23, 24, 25, \
@@ -373,18 +611,20 @@ extern int target_flags;
/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
On Alpha, the integer registers can hold any mode. The floating-point
registers can hold 32-bit and 64-bit integers as well, but not 16-bit
- or 8-bit values. If we only allowed the larger integers into FP registers,
- we'd have to say that QImode and SImode aren't tiable, which is a
- pain. So say all registers can hold everything and see how that works. */
+ or 8-bit values. */
-#define HARD_REGNO_MODE_OK(REGNO, MODE) 1
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((REGNO) < 32 || ((MODE) != QImode && (MODE) != HImode))
/* Value is 1 if it is a good idea to tie two pseudo registers
when one has mode MODE1 and one has mode MODE2.
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
for any hard reg, then this must be 0 for correct output. */
-#define MODES_TIEABLE_P(MODE1, MODE2) 1
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == QImode || (MODE1) == HImode \
+ ? (MODE2) == QImode || (MODE2) == HImode \
+ : 1)
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
@@ -499,9 +739,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
: (C) == 'J' ? (VALUE) == 0 \
: (C) == 'K' ? (unsigned HOST_WIDE_INT) ((VALUE) + 0x8000) < 0x10000 \
: (C) == 'L' ? (((VALUE) & 0xffff) == 0 \
- && (((VALUE)) >> 31 == -1 || (VALUE) >> 31 == 0) \
- && ((HOST_BITS_PER_WIDE_INT == 64 \
- || (unsigned) (VALUE) != 0x80000000U))) \
+ && (((VALUE)) >> 31 == -1 || (VALUE) >> 31 == 0)) \
: (C) == 'M' ? zap_mask (VALUE) \
: (C) == 'N' ? (unsigned HOST_WIDE_INT) (~ (VALUE)) < 0x100 \
: (C) == 'O' ? (unsigned HOST_WIDE_INT) (- (VALUE)) < 0x100 \
@@ -526,12 +764,17 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
For the Alpha, `Q' means that this is a memory operand but not a
reference to an unaligned location.
+
`R' is a SYMBOL_REF that has SYMBOL_REF_FLAG set or is the current
- function. */
+ function.
+
+ 'S' is a 6-bit constant (valid for a shift insn). */
#define EXTRA_CONSTRAINT(OP, C) \
- ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) != AND \
- : (C) == 'R' ? current_file_function_operand (OP, Pmode) \
+ ((C) == 'Q' ? GET_CODE (OP) == MEM && GET_CODE (XEXP (OP, 0)) != AND \
+ : (C) == 'R' ? current_file_function_operand (OP, Pmode) \
+ : (C) == 'S' ? (GET_CODE (OP) == CONST_INT \
+ && (unsigned HOST_WIDE_INT) INTVAL (OP) < 64) \
: 0)
/* Given an rtx X being reloaded into a reg required to be
@@ -544,12 +787,13 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
#define PREFERRED_RELOAD_CLASS(X, CLASS) \
(CONSTANT_P (X) && (X) != const0_rtx && (X) != CONST0_RTX (GET_MODE (X)) \
- ? ((CLASS) == FLOAT_REGS ? NO_REGS : GENERAL_REGS) \
+ ? ((CLASS) == FLOAT_REGS || (CLASS) == NO_REGS ? NO_REGS : GENERAL_REGS)\
: (CLASS))
/* Loading and storing HImode or QImode values to and from memory
usually requires a scratch register. The exceptions are loading
- QImode and HImode from an aligned address to a general register.
+ QImode and HImode from an aligned address to a general register
+ unless byte instructions are permitted.
We also cannot load an unaligned address or a paradoxical SUBREG into an
FP register. */
@@ -563,7 +807,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
&& (((CLASS) == FLOAT_REGS \
&& ((MODE) == SImode || (MODE) == HImode || (MODE) == QImode)) \
|| (((MODE) == QImode || (MODE) == HImode) \
- && unaligned_memory_operand (IN, MODE)))) \
+ && ! TARGET_BWX && unaligned_memory_operand (IN, MODE)))) \
? GENERAL_REGS \
: ((CLASS) == FLOAT_REGS && GET_CODE (IN) == MEM \
&& GET_CODE (XEXP (IN, 0)) == AND) ? GENERAL_REGS \
@@ -579,8 +823,9 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
&& (GET_CODE (SUBREG_REG (OUT)) == MEM \
|| (GET_CODE (SUBREG_REG (OUT)) == REG \
&& REGNO (SUBREG_REG (OUT)) >= FIRST_PSEUDO_REGISTER)))) \
- && (((MODE) == HImode || (MODE) == QImode \
- || ((MODE) == SImode && (CLASS) == FLOAT_REGS)))) \
+ && ((((MODE) == HImode || (MODE) == QImode) \
+ && (! TARGET_BWX || (CLASS) == FLOAT_REGS)) \
+ || ((MODE) == SImode && (CLASS) == FLOAT_REGS))) \
? GENERAL_REGS \
: ((CLASS) == FLOAT_REGS && GET_CODE (OUT) == MEM \
&& GET_CODE (XEXP (OUT, 0)) == AND) ? GENERAL_REGS \
@@ -590,9 +835,10 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
: NO_REGS)
/* If we are copying between general and FP registers, we need a memory
- location. */
+ location unless the CIX extension is available. */
-#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) ((CLASS1) != (CLASS2))
+#define SECONDARY_MEMORY_NEEDED(CLASS1,CLASS2,MODE) \
+ (! TARGET_CIX && (CLASS1) != (CLASS2))
/* Specify the mode to be used for memory when a secondary memory
location is needed. If MODE is floating-point, use it. Otherwise,
@@ -622,15 +868,18 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
reduce the impact of not being able to allocate a pseudo to a
hard register. */
-#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
- (((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) ? 2 : 20)
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((CLASS1) == FLOAT_REGS) == ((CLASS2) == FLOAT_REGS) \
+ ? 2 \
+ : TARGET_CIX ? 3 : 4+2*alpha_memory_latency)
/* A C expressions returning the cost of moving data of MODE from a register to
or from memory.
On the Alpha, bump this up a bit. */
-#define MEMORY_MOVE_COST(MODE) 6
+extern int alpha_memory_latency;
+#define MEMORY_MOVE_COST(MODE,CLASS,IN) (2*alpha_memory_latency)
/* Provide the cost of a branch. Exact meaning under development. */
#define BRANCH_COST 5
@@ -664,6 +913,9 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
On Alpha, don't define this because there are no push insns. */
/* #define PUSH_ROUNDING(BYTES) */
+/* Define this to be nonzero if stack checking is built into the ABI. */
+#define STACK_CHECK_BUILTIN 1
+
/* Define this if the maximum size of all the outgoing args is to be
accumulated and pushed during the prologue. The amount can be
found in the variable current_function_outgoing_args_size. */
@@ -738,18 +990,25 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
$f0 for floating-point functions. */
#define FUNCTION_VALUE(VALTYPE, FUNC) \
- gen_rtx (REG, \
- (INTEGRAL_MODE_P (TYPE_MODE (VALTYPE)) \
- && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
- ? word_mode : TYPE_MODE (VALTYPE), \
- TARGET_FPREGS && TREE_CODE (VALTYPE) == REAL_TYPE ? 32 : 0)
+ gen_rtx (REG, \
+ ((INTEGRAL_TYPE_P (VALTYPE) \
+ && TYPE_PRECISION (VALTYPE) < BITS_PER_WORD) \
+ || POINTER_TYPE_P (VALTYPE)) \
+ ? word_mode : TYPE_MODE (VALTYPE), \
+ ((TARGET_FPREGS \
+ && (TREE_CODE (VALTYPE) == REAL_TYPE \
+ || TREE_CODE (VALTYPE) == COMPLEX_TYPE)) \
+ ? 32 : 0))
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
#define LIBCALL_VALUE(MODE) \
- gen_rtx (REG, MODE, \
- TARGET_FPREGS && GET_MODE_CLASS (MODE) == MODE_FLOAT ? 32 : 0)
+ gen_rtx (REG, MODE, \
+ (TARGET_FPREGS \
+ && (GET_MODE_CLASS (MODE) == MODE_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT) \
+ ? 32 : 0))
/* The definition of this macro implies that there are cases where
a scalar value cannot be returned in registers.
@@ -764,7 +1023,8 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
/* 1 if N is a possible register number for a function value
as seen by the caller. */
-#define FUNCTION_VALUE_REGNO_P(N) ((N) == 0 || (N) == 32)
+#define FUNCTION_VALUE_REGNO_P(N) \
+ ((N) == 0 || (N) == 1 || (N) == 32 || (N) == 33)
/* 1 if N is a possible register number for function argument passing.
On Alpha, these are $16-$21 and $f16-$f21. */
@@ -788,7 +1048,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
-#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) (CUM) = 0
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) (CUM) = 0
/* Define intermediate macro to compute the size (in registers) of an argument
for the Alpha. */
@@ -894,6 +1154,7 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
plus_constant (virtual_incoming_args_rtx, \
(CUM) * UNITS_PER_WORD)), \
6 - (CUM), (6 - (CUM)) * UNITS_PER_WORD); \
+ emit_insn (gen_blockage ()); \
} \
PRETEND_SIZE = 12 * UNITS_PER_WORD; \
} \
@@ -905,6 +1166,9 @@ enum reg_class { NO_REGS, GENERAL_REGS, FLOAT_REGS, ALL_REGS,
emitted. If it would take more than N insns, zero is returned and no
insns and emitted. */
extern struct rtx_def *alpha_emit_set_const ();
+extern struct rtx_def *alpha_emit_set_long_const ();
+extern struct rtx_def *alpha_emit_conditional_branch ();
+extern struct rtx_def *alpha_emit_conditional_move ();
/* Generate necessary RTL for __builtin_saveregs().
ARGLIST is the argument list; see expr.c. */
@@ -918,25 +1182,34 @@ extern struct rtx_def *alpha_builtin_saveregs ();
extern struct rtx_def *alpha_compare_op0, *alpha_compare_op1;
extern int alpha_compare_fp_p;
-/* This macro produces the initial definition of a function name. On the
- Alpha, we need to save the function name for the prologue and epilogue. */
+/* Make (or fake) .linkage entry for function call.
+ IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */
+extern void alpha_need_linkage ();
-extern char *alpha_function_name;
+/* This macro defines the start of an assembly comment. */
-#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
-{ \
- alpha_function_name = NAME; \
-}
+#define ASM_COMMENT_START " #"
+
+/* This macro produces the initial definition of a function. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE,NAME,DECL) \
+ alpha_start_function(FILE,NAME,DECL);
+extern void alpha_start_function ();
+
+/* This macro closes up a function definition for the assembler. */
+
+#define ASM_DECLARE_FUNCTION_SIZE(FILE,NAME,DECL) \
+ alpha_end_function(FILE,NAME,DECL)
+extern void alpha_end_function ();
-/* This macro generates the assembly code for function entry.
- FILE is a stdio stream to output the code to.
- SIZE is an int: how many units of temporary storage to allocate.
- Refer to the array `regs_ever_live' to determine which registers
- to save; `regs_ever_live[I]' is nonzero if register number I
- is ever used in the function. This macro is responsible for
- knowing which registers should not be saved even if used. */
+/* This macro notes the end of the prologue. */
-#define FUNCTION_PROLOGUE(FILE, SIZE) output_prolog (FILE, SIZE)
+#define FUNCTION_END_PROLOGUE(FILE) output_end_prologue (FILE)
+extern void output_end_prologue ();
+
+/* Output any profiling code before the prologue. */
+
+#define PROFILE_BEFORE_PROLOGUE 1
/* Output assembler code to FILE to increment profiler label # LABELNO
for profiling a function entry. Under OSF/1, profiling is enabled
@@ -986,19 +1259,6 @@ extern char *alpha_function_name;
No definition is equivalent to always zero. */
#define EXIT_IGNORE_STACK 1
-
-/* This macro generates the assembly code for function exit,
- on machines that need it. If FUNCTION_EPILOGUE is not defined
- then individual return instructions are generated for each
- return statement. Args are same as for FUNCTION_PROLOGUE.
-
- The function epilogue should not depend on the current stack pointer!
- It should use the frame pointer only. This is mandatory because
- of alloca; we also take advantage of it to omit stack adjustments
- before returning. */
-
-#define FUNCTION_EPILOGUE(FILE, SIZE) output_epilog (FILE, SIZE)
-
/* Output assembler code for a block containing the constant parts
of a trampoline, leaving space for the variable parts.
@@ -1010,13 +1270,13 @@ extern char *alpha_function_name;
aligned to FUNCTION_BOUNDARY, which is 64 bits. */
#define TRAMPOLINE_TEMPLATE(FILE) \
-{ \
+do { \
fprintf (FILE, "\tldq $1,24($27)\n"); \
fprintf (FILE, "\tldq $27,16($27)\n"); \
fprintf (FILE, "\tjmp $31,($27),0\n"); \
fprintf (FILE, "\tnop\n"); \
fprintf (FILE, "\t.quad 0,0\n"); \
-}
+} while (0)
/* Section in which to place the trampoline. On Alpha, instructions
may only be placed in a text segment. */
@@ -1029,76 +1289,24 @@ extern char *alpha_function_name;
/* Emit RTL insns to initialize the variable parts of a trampoline.
FNADDR is an RTX for the address of the function's pure code.
- CXT is an RTX for the static chain value for the function. We assume
- here that a function will be called many more times than its address
- is taken (e.g., it might be passed to qsort), so we take the trouble
- to initialize the "hint" field in the JMP insn. Note that the hint
- field is PC (new) + 4 * bits 13:0. */
-
-#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
-{ \
- rtx _temp, _temp1, _addr; \
- \
- _addr = memory_address (Pmode, plus_constant ((TRAMP), 16)); \
- emit_move_insn (gen_rtx (MEM, Pmode, _addr), (FNADDR)); \
- _addr = memory_address (Pmode, plus_constant ((TRAMP), 24)); \
- emit_move_insn (gen_rtx (MEM, Pmode, _addr), (CXT)); \
- \
- _temp = force_operand (plus_constant ((TRAMP), 12), NULL_RTX); \
- _temp = expand_binop (DImode, sub_optab, (FNADDR), _temp, _temp, 1, \
- OPTAB_WIDEN); \
- _temp = expand_shift (RSHIFT_EXPR, Pmode, _temp, \
- build_int_2 (2, 0), NULL_RTX, 1); \
- _temp = expand_and (gen_lowpart (SImode, _temp), \
- GEN_INT (0x3fff), 0); \
- \
- _addr = memory_address (SImode, plus_constant ((TRAMP), 8)); \
- _temp1 = force_reg (SImode, gen_rtx (MEM, SImode, _addr)); \
- _temp1 = expand_and (_temp1, GEN_INT (0xffffc000), NULL_RTX); \
- _temp1 = expand_binop (SImode, ior_optab, _temp1, _temp, _temp1, 1, \
- OPTAB_WIDEN); \
- \
- emit_move_insn (gen_rtx (MEM, SImode, _addr), _temp1); \
- \
- emit_library_call (gen_rtx (SYMBOL_REF, Pmode, \
- "__enable_execute_stack"), \
- 0, VOIDmode, 1,_addr, Pmode); \
- \
- emit_insn (gen_rtx (UNSPEC_VOLATILE, VOIDmode, \
- gen_rtvec (1, const0_rtx), 0)); \
-}
+ CXT is an RTX for the static chain value for the function. */
-/* Attempt to turn on access permissions for the stack. */
-
-#define TRANSFER_FROM_TRAMPOLINE \
- \
-void \
-__enable_execute_stack (addr) \
- void *addr; \
-{ \
- long size = getpagesize (); \
- long mask = ~(size-1); \
- char *page = (char *) (((long) addr) & mask); \
- char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
- \
- /* 7 is PROT_READ | PROT_WRITE | PROT_EXEC */ \
- if (mprotect (page, end - page, 7) < 0) \
- perror ("mprotect of trampoline code"); \
-}
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 16, 24, 8)
/* A C expression whose value is RTL representing the value of the return
address for the frame COUNT steps up from the current frame.
FRAMEADDR is the frame pointer of the COUNT frame, or the frame pointer of
- the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME} is defined.
+ the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME is defined. */
+
+#define RETURN_ADDR_RTX alpha_return_addr
+extern struct rtx_def *alpha_return_addr ();
- This definition for Alpha is broken, but is put in at the request of
- Mike Stump. */
+/* Initialize data used by insn expanders. This is called from insn_emit,
+ once for every function before code is generated. */
-#define RETURN_ADDR_RTX(COUNT, FRAME) \
-((COUNT == 0 && alpha_sa_size () == 0 && 0 /* not right. */) \
- ? gen_rtx (REG, Pmode, 26) \
- : gen_rtx (MEM, Pmode, \
- memory_address (Pmode, FRAME)))
+#define INIT_EXPANDERS alpha_init_expanders ()
+extern void alpha_init_expanders ();
/* Addressing modes, and classification of registers for them. */
@@ -1284,6 +1492,58 @@ __enable_execute_stack (addr) \
} \
}
+/* Try a machine-dependent way of reloading an illegitimate address
+ operand. If we find one, push the reload and jump to WIN. This
+ macro is used in only one place: `find_reloads_address' in reload.c.
+
+ For the Alpha, we wish to handle large displacements off a base
+ register by splitting the addend across an ldah and the mem insn.
+ This cuts number of extra insns needed from 3 to 1. */
+
+#define LEGITIMIZE_RELOAD_ADDRESS(X,MODE,OPNUM,TYPE,IND_LEVELS,WIN) \
+do { \
+ /* We must recognize output that we have already generated ourselves. */ \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == PLUS \
+ && GET_CODE (XEXP (XEXP (X, 0), 0)) == REG \
+ && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+ if (GET_CODE (X) == PLUS \
+ && GET_CODE (XEXP (X, 0)) == REG \
+ && REGNO (XEXP (X, 0)) < FIRST_PSEUDO_REGISTER \
+ && REG_MODE_OK_FOR_BASE_P (XEXP (X, 0), MODE) \
+ && GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ HOST_WIDE_INT val = INTVAL (XEXP (X, 1)); \
+ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000; \
+ HOST_WIDE_INT high \
+ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000; \
+ \
+ /* Check for 32-bit overflow. */ \
+ if (high + low != val) \
+ break; \
+ \
+ /* Reload the high part into a base reg; leave the low part \
+ in the mem directly. */ \
+ \
+ X = gen_rtx_PLUS (GET_MODE (X), \
+ gen_rtx_PLUS (GET_MODE (X), XEXP (X, 0), \
+ GEN_INT (high)), \
+ GEN_INT (low)); \
+ \
+ push_reload (XEXP (X, 0), NULL_RTX, &XEXP (X, 0), NULL_PTR, \
+ BASE_REG_CLASS, GET_MODE (X), VOIDmode, 0, 0, \
+ OPNUM, TYPE); \
+ goto WIN; \
+ } \
+} while (0)
+
/* Go to LABEL if ADDR (a legitimate address expression)
has an effect that depends on the machine mode it is used for.
On the Alpha this is true only for the unaligned modes. We can
@@ -1297,22 +1557,22 @@ __enable_execute_stack (addr) \
#define ADDRESS_COST(X) 0
-/* Define this if some processing needs to be done immediately before
- emitting code for an insn. */
-
-/* #define FINAL_PRESCAN_INSN(INSN,OPERANDS,NOPERANDS) */
+/* Machine-dependent reorg pass. */
+#define MACHINE_DEPENDENT_REORG(X) alpha_reorg(X)
/* Specify the machine mode that this machine uses
for the index in the tablejump instruction. */
#define CASE_VECTOR_MODE SImode
-/* Define this if the tablejump instruction expects the table
- to contain offsets from the address of the table.
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+
Do not define this if the table should contain absolute addresses.
On the Alpha, the table is really GP-relative, not relative to the PC
of the table, but we pretend that it is PC-relative; this should be OK,
but we should try to find some better way sometime. */
-#define CASE_VECTOR_PC_RELATIVE
+#define CASE_VECTOR_PC_RELATIVE 1
/* Specify the tree operation to be used to convert reals to integers. */
#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
@@ -1336,6 +1596,12 @@ __enable_execute_stack (addr) \
#define MOVE_MAX 8
+/* Controls how many units are moved by expr.c before resorting to movstr.
+ Without byte/word accesses, we want no more than one; with, several single
+ byte accesses are better. */
+
+#define MOVE_RATIO (TARGET_BWX ? 7 : 2)
+
/* Largest number of bytes of an object that can be placed in a register.
On the Alpha we have plenty of registers, so use TImode. */
#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
@@ -1357,7 +1623,7 @@ __enable_execute_stack (addr) \
will either zero-extend or sign-extend. The value of this macro should
be the code that says which one of the two operations is implicitly
done, NIL if none. */
-#define LOAD_EXTEND_OP(MODE) SIGN_EXTEND
+#define LOAD_EXTEND_OP(MODE) ((MODE) == SImode ? SIGN_EXTEND : ZERO_EXTEND)
/* Define if loading short immediate values into registers sign extends. */
#define SHORT_IMMEDIATES_SIGN_EXTEND
@@ -1373,7 +1639,7 @@ __enable_execute_stack (addr) \
/* Define the value returned by a floating-point comparison instruction. */
-#define FLOAT_STORE_FLAG_VALUE 0.5
+#define FLOAT_STORE_FLAG_VALUE (TARGET_FLOAT_VAX ? 0.5 : 2.0)
/* Canonicalize a comparison from one we don't have to one we do have. */
@@ -1424,6 +1690,9 @@ __enable_execute_stack (addr) \
our own exit function. */
#define HAVE_ATEXIT
+/* The EV4 is dual issue; EV5/EV6 are quad issue. */
+#define ISSUE_RATE (alpha_cpu == PROCESSOR_EV4 ? 2 : 4)
+
/* Compute the cost of computing a constant rtl expression RTX
whose rtx-code is CODE. The body of this macro is a portion
of a switch statement. If the code is computed here,
@@ -1440,7 +1709,9 @@ __enable_execute_stack (addr) \
if (INTVAL (RTX) >= 0 && INTVAL (RTX) < 256) \
return 0; \
case CONST_DOUBLE: \
- if (((OUTER_CODE) == PLUS && add_operand (RTX, VOIDmode)) \
+ if ((RTX) == CONST0_RTX (GET_MODE (RTX))) \
+ return 0; \
+ else if (((OUTER_CODE) == PLUS && add_operand (RTX, VOIDmode)) \
|| ((OUTER_CODE) == AND && and_operand (RTX, VOIDmode))) \
return 0; \
else if (add_operand (RTX, VOIDmode) || and_operand (RTX, VOIDmode)) \
@@ -1450,7 +1721,15 @@ __enable_execute_stack (addr) \
case CONST: \
case SYMBOL_REF: \
case LABEL_REF: \
- return COSTS_N_INSNS (3);
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ return COSTS_N_INSNS (3); \
+ case PROCESSOR_EV5: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (2); \
+ default: abort(); \
+ }
/* Provide the costs of a rtl expression. This is in the body of a
switch on CODE. */
@@ -1458,52 +1737,128 @@ __enable_execute_stack (addr) \
#define RTX_COSTS(X,CODE,OUTER_CODE) \
case PLUS: case MINUS: \
if (FLOAT_MODE_P (GET_MODE (X))) \
- return COSTS_N_INSNS (6); \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ return COSTS_N_INSNS (6); \
+ case PROCESSOR_EV5: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (4); \
+ default: abort(); \
+ } \
else if (GET_CODE (XEXP (X, 0)) == MULT \
&& const48_operand (XEXP (XEXP (X, 0), 1), VOIDmode)) \
return (2 + rtx_cost (XEXP (XEXP (X, 0), 0), OUTER_CODE) \
+ rtx_cost (XEXP (X, 1), OUTER_CODE)); \
break; \
case MULT: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- return COSTS_N_INSNS (6); \
- return COSTS_N_INSNS (23); \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (6); \
+ return COSTS_N_INSNS (23); \
+ case PROCESSOR_EV5: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (4); \
+ else if (GET_MODE (X) == DImode) \
+ return COSTS_N_INSNS (12); \
+ else \
+ return COSTS_N_INSNS (8); \
+ case PROCESSOR_EV6: \
+ if (FLOAT_MODE_P (GET_MODE (X))) \
+ return COSTS_N_INSNS (4); \
+ else \
+ return COSTS_N_INSNS (7); \
+ default: abort(); \
+ } \
case ASHIFT: \
if (GET_CODE (XEXP (X, 1)) == CONST_INT \
&& INTVAL (XEXP (X, 1)) <= 3) \
break; \
/* ... fall through ... */ \
- case ASHIFTRT: case LSHIFTRT: case IF_THEN_ELSE: \
- return COSTS_N_INSNS (2); \
+ case ASHIFTRT: case LSHIFTRT: \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ return COSTS_N_INSNS (2); \
+ case PROCESSOR_EV5: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (1); \
+ default: abort(); \
+ } \
+ case IF_THEN_ELSE: \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (2); \
+ case PROCESSOR_EV5: \
+ return COSTS_N_INSNS (1); \
+ default: abort(); \
+ } \
case DIV: case UDIV: case MOD: case UMOD: \
- if (GET_MODE (X) == SFmode) \
- return COSTS_N_INSNS (34); \
- else if (GET_MODE (X) == DFmode) \
- return COSTS_N_INSNS (63); \
- else \
- return COSTS_N_INSNS (70); \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ if (GET_MODE (X) == SFmode) \
+ return COSTS_N_INSNS (34); \
+ else if (GET_MODE (X) == DFmode) \
+ return COSTS_N_INSNS (63); \
+ else \
+ return COSTS_N_INSNS (70); \
+ case PROCESSOR_EV5: \
+ if (GET_MODE (X) == SFmode) \
+ return COSTS_N_INSNS (15); \
+ else if (GET_MODE (X) == DFmode) \
+ return COSTS_N_INSNS (22); \
+ else \
+ return COSTS_N_INSNS (70); /* ??? */ \
+ case PROCESSOR_EV6: \
+ if (GET_MODE (X) == SFmode) \
+ return COSTS_N_INSNS (12); \
+ else if (GET_MODE (X) == DFmode) \
+ return COSTS_N_INSNS (15); \
+ else \
+ return COSTS_N_INSNS (70); /* ??? */ \
+ default: abort(); \
+ } \
case MEM: \
- return COSTS_N_INSNS (3); \
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (3); \
+ case PROCESSOR_EV5: \
+ return COSTS_N_INSNS (2); \
+ default: abort(); \
+ } \
+ case NEG: case ABS: \
+ if (! FLOAT_MODE_P (GET_MODE (X))) \
+ break; \
+ /* ... fall through ... */ \
case FLOAT: case UNSIGNED_FLOAT: case FIX: case UNSIGNED_FIX: \
case FLOAT_EXTEND: case FLOAT_TRUNCATE: \
- return COSTS_N_INSNS (6); \
- case NEG: case ABS: \
- if (FLOAT_MODE_P (GET_MODE (X))) \
- return COSTS_N_INSNS (6); \
- break;
+ switch (alpha_cpu) \
+ { \
+ case PROCESSOR_EV4: \
+ return COSTS_N_INSNS (6); \
+ case PROCESSOR_EV5: \
+ case PROCESSOR_EV6: \
+ return COSTS_N_INSNS (4); \
+ default: abort(); \
+ }
/* Control the assembler format that we output. */
-/* Output at beginning of assembler file. */
-
-#define ASM_FILE_START(FILE) \
-{ \
- alpha_write_verstamp (FILE); \
- fprintf (FILE, "\t.set noreorder\n"); \
- fprintf (FILE, "\t.set volatile\n"); \
- fprintf (FILE, "\t.set noat\n"); \
- ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
-}
+/* We don't emit these labels, so as to avoid getting linker errors about
+ missing exception handling info. If we emit a gcc_compiled. label into
+ text, and the file has no code, then the DEC assembler gives us a zero
+ sized text section with no associated exception handling info. The
+ DEC linker sees this text section, and gives a warning saying that
+ the exception handling info is missing. */
+#define ASM_IDENTIFY_GCC(x)
+#define ASM_IDENTIFY_LANGUAGE(x)
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
@@ -1592,20 +1947,15 @@ literal_section () \
#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
do { fputs ("\t.globl ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
-/* This is how to output a reference to a user-level label named NAME.
- `assemble_name' uses this. */
+/* The prefix to add to user-visible assembler symbols. */
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- fprintf (FILE, "%s", NAME)
+#define USER_LABEL_PREFIX ""
/* This is how to output an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
- if ((PREFIX)[0] == 'L') \
- fprintf (FILE, "$%s%d:\n", & (PREFIX)[1], NUM + 32); \
- else \
- fprintf (FILE, "%s%d:\n", PREFIX, NUM);
+ fprintf (FILE, "$%s%d:\n", PREFIX, NUM)
/* This is how to output a label for a jump table. Arguments are the same as
for ASM_OUTPUT_INTERNAL_LABEL, except the insn for the jump table is
@@ -1620,10 +1970,12 @@ literal_section () \
This is suitable for output with `assemble_name'. */
#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
- if ((PREFIX)[0] == 'L') \
- sprintf (LABEL, "*$%s%d", & (PREFIX)[1], NUM + 32); \
- else \
- sprintf (LABEL, "*%s%d", PREFIX, NUM)
+ sprintf (LABEL, "*$%s%d", PREFIX, NUM)
+
+/* Check a floating-point value for validity for a particular machine mode. */
+
+#define CHECK_FLOAT_VALUE(MODE, D, OVERFLOW) \
+ ((OVERFLOW) = check_float_value (MODE, &D, OVERFLOW))
/* This is how to output an assembler line defining a `double' constant. */
@@ -1642,29 +1994,18 @@ literal_section () \
{ \
char str[30]; \
REAL_VALUE_TO_DECIMAL (VALUE, "%.20e", str); \
- fprintf (FILE, "\t.t_floating %s\n", str); \
+ fprintf (FILE, "\t.%c_floating %s\n", (TARGET_FLOAT_VAX)?'g':'t', str); \
} \
}
/* This is how to output an assembler line defining a `float' constant. */
-#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
- { \
- if (REAL_VALUE_ISINF (VALUE) \
- || REAL_VALUE_ISNAN (VALUE) \
- || REAL_VALUE_MINUS_ZERO (VALUE)) \
- { \
- long t; \
- REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
- fprintf (FILE, "\t.long 0x%lx\n", t & 0xffffffff); \
- } \
- else \
- { \
- char str[30]; \
- REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
- fprintf (FILE, "\t.s_floating %s\n", str); \
- } \
- }
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ do { \
+ long t; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ fprintf (FILE, "\t.long 0x%lx\n", t & 0xffffffff); \
+} while (0)
/* This is how to output an assembler line defining an `int' constant. */
@@ -1684,12 +2025,12 @@ literal_section () \
#define ASM_OUTPUT_SHORT(FILE,VALUE) \
fprintf (FILE, "\t.word %d\n", \
- (GET_CODE (VALUE) == CONST_INT \
+ (int)(GET_CODE (VALUE) == CONST_INT \
? INTVAL (VALUE) & 0xffff : (abort (), 0)))
#define ASM_OUTPUT_CHAR(FILE,VALUE) \
fprintf (FILE, "\t.byte %d\n", \
- (GET_CODE (VALUE) == CONST_INT \
+ (int)(GET_CODE (VALUE) == CONST_INT \
? INTVAL (VALUE) & 0xff : (abort (), 0)))
/* We use the default ASCII-output routine, except that we don't write more
@@ -1756,7 +2097,7 @@ literal_section () \
/* This is how to output an assembler line for a numeric constant byte. */
#define ASM_OUTPUT_BYTE(FILE,VALUE) \
- fprintf (FILE, "\t.byte 0x%x\n", (VALUE) & 0xff)
+ fprintf (FILE, "\t.byte 0x%x\n", (int) ((VALUE) & 0xff))
/* This is how to output an element of a case-vector that is absolute.
(Alpha does not use such vectors, but we must define this macro anyway.) */
@@ -1765,13 +2106,9 @@ literal_section () \
/* This is how to output an element of a case-vector that is relative. */
-#if WINDOWS_NT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
- fprintf (FILE, "\t.long $%d\n", (VALUE) + 32)
-#else
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
- fprintf (FILE, "\t.gprel32 $%d\n", (VALUE) + 32)
-#endif
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.%s $L%d\n", TARGET_WINDOWS_NT ? "long" : "gprel32", \
+ (VALUE))
/* This is how to output an assembler line
that says to advance the location counter
@@ -1816,6 +2153,36 @@ literal_section () \
#define ASM_OPEN_PAREN "("
#define ASM_CLOSE_PAREN ")"
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ char *fn_name = XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0); \
+ int reg; \
+ \
+ /* Mark end of prologue. */ \
+ output_end_prologue (FILE); \
+ \
+ /* Rely on the assembler to macro expand a large delta. */ \
+ reg = aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) ? 17 : 16; \
+ fprintf (FILE, "\tlda $%d,%ld($%d)\n", reg, (long)(DELTA), reg); \
+ \
+ if (current_file_function_operand (XEXP (DECL_RTL (FUNCTION), 0))) \
+ { \
+ fprintf (FILE, "\tbr $31,$"); \
+ assemble_name (FILE, fn_name); \
+ fprintf (FILE, "..ng\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tjmp $31,"); \
+ assemble_name (FILE, fn_name); \
+ fputc ('\n', FILE); \
+ } \
+} while (0)
+
+
/* Define results of standard character escape sequences. */
#define TARGET_BELL 007
#define TARGET_BS 010
@@ -1832,9 +2199,41 @@ literal_section () \
#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
/* Determine which codes are valid without a following integer. These must
- not be alphabetic. */
+ not be alphabetic (the characters are chosen so that
+ PRINT_OPERAND_PUNCT_VALID_P translates into a simple range change when
+ using ASCII).
+
+ & Generates fp-rounding mode suffix: nothing for normal, 'c' for
+ chopped, 'm' for minus-infinity, and 'd' for dynamic rounding
+ mode. alpha_fprm controls which suffix is generated.
+
+ ' Generates trap-mode suffix for instructions that accept the
+ su suffix only (cmpt et al).
+
+ ` Generates trap-mode suffix for instructions that accept the
+ v and sv suffix. The only instruction that needs this is cvtql.
+
+ ( Generates trap-mode suffix for instructions that accept the
+ v, sv, and svi suffix. The only instruction that needs this
+ is cvttq.
-#define PRINT_OPERAND_PUNCT_VALID_P(CODE) 0
+ ) Generates trap-mode suffix for instructions that accept the
+ u, su, and sui suffix. This is the bulk of the IEEE floating
+ point instructions (addt et al).
+
+ + Generates trap-mode suffix for instructions that accept the
+ sui suffix (cvtqt and cvtqs).
+
+ , Generates single precision suffix for floating point
+ instructions (s for IEEE, f for VAX)
+
+ - Generates double precision suffix for floating point
+ instructions (t for IEEE, g for VAX)
+ */
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
+ ((CODE) == '&' || (CODE) == '`' || (CODE) == '\'' || (CODE) == '(' \
+ || (CODE) == ')' || (CODE) == '+' || (CODE) == ',' || (CODE) == '-')
/* Print a memory address as an operand to reference that memory location. */
@@ -1857,38 +2256,42 @@ literal_section () \
else \
abort (); \
\
- fprintf (FILE, "%d($%d)", offset, basereg); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, offset); \
+ fprintf (FILE, "($%d)", basereg); \
}
/* Define the codes that are matched by predicates in alpha.c. */
-#define PREDICATE_CODES \
- {"reg_or_0_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_6bit_operand", {SUBREG, REG, CONST_INT}}, \
- {"reg_or_8bit_operand", {SUBREG, REG, CONST_INT}}, \
- {"cint8_operand", {CONST_INT}}, \
- {"reg_or_cint_operand", {SUBREG, REG, CONST_INT}}, \
- {"add_operand", {SUBREG, REG, CONST_INT}}, \
- {"sext_add_operand", {SUBREG, REG, CONST_INT}}, \
- {"const48_operand", {CONST_INT}}, \
- {"and_operand", {SUBREG, REG, CONST_INT}}, \
- {"or_operand", {SUBREG, REG, CONST_INT}}, \
- {"mode_mask_operand", {CONST_INT}}, \
- {"mul8_operand", {CONST_INT}}, \
- {"mode_width_operand", {CONST_INT}}, \
- {"reg_or_fp0_operand", {SUBREG, REG, CONST_DOUBLE}}, \
- {"alpha_comparison_operator", {EQ, LE, LT, LEU, LTU}}, \
- {"signed_comparison_operator", {EQ, NE, LE, LT, GE, GT}}, \
- {"divmod_operator", {DIV, MOD, UDIV, UMOD}}, \
- {"fp0_operand", {CONST_DOUBLE}}, \
- {"current_file_function_operand", {SYMBOL_REF}}, \
- {"call_operand", {REG, SYMBOL_REF}}, \
- {"input_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
- SYMBOL_REF, CONST, LABEL_REF}}, \
- {"some_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
- SYMBOL_REF, CONST, LABEL_REF}}, \
- {"aligned_memory_operand", {MEM}}, \
- {"unaligned_memory_operand", {MEM}}, \
- {"any_memory_operand", {MEM}},
+#define PREDICATE_CODES \
+ {"reg_or_0_operand", {SUBREG, REG, CONST_INT}}, \
+ {"reg_or_6bit_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"reg_or_8bit_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"cint8_operand", {CONST_INT, CONSTANT_P_RTX}}, \
+ {"reg_or_cint_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"add_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"sext_add_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"const48_operand", {CONST_INT}}, \
+ {"and_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"or_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+ {"mode_mask_operand", {CONST_INT}}, \
+ {"mul8_operand", {CONST_INT}}, \
+ {"mode_width_operand", {CONST_INT}}, \
+ {"reg_or_fp0_operand", {SUBREG, REG, CONST_DOUBLE}}, \
+ {"alpha_comparison_operator", {EQ, LE, LT, LEU, LTU}}, \
+ {"alpha_swapped_comparison_operator", {EQ, GE, GT, GEU, GTU}}, \
+ {"signed_comparison_operator", {EQ, NE, LE, LT, GE, GT}}, \
+ {"divmod_operator", {DIV, MOD, UDIV, UMOD}}, \
+ {"fp0_operand", {CONST_DOUBLE}}, \
+ {"current_file_function_operand", {SYMBOL_REF}}, \
+ {"call_operand", {REG, SYMBOL_REF}}, \
+ {"input_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
+ SYMBOL_REF, CONST, LABEL_REF, CONSTANT_P_RTX}}, \
+ {"some_operand", {SUBREG, REG, MEM, CONST_INT, CONST_DOUBLE, \
+ SYMBOL_REF, CONST, LABEL_REF, CONSTANT_P_RTX}}, \
+ {"aligned_memory_operand", {MEM}}, \
+ {"unaligned_memory_operand", {MEM}}, \
+ {"reg_or_unaligned_mem_operand", {SUBREG, REG, MEM}}, \
+ {"any_memory_operand", {MEM}}, \
+ {"hard_fp_register_operand", {SUBREG, REG}},
/* Tell collect that the object format is ECOFF. */
#define OBJECT_FORMAT_COFF
@@ -1904,8 +2307,7 @@ literal_section () \
#define MIPS_DEBUGGING_INFO /* MIPS specific debugging info */
#ifndef PREFERRED_DEBUGGING_TYPE /* assume SDB_DEBUGGING_INFO */
-#define PREFERRED_DEBUGGING_TYPE \
- ((len > 1 && !strncmp (str, "ggdb", len)) ? DBX_DEBUG : SDB_DEBUG)
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
#endif
@@ -1937,13 +2339,17 @@ extern void alpha_output_lineno ();
alpha_output_filename (STREAM, NAME)
extern void alpha_output_filename ();
-
-/* mips-tfile.c limits us to strings of one page. */
-#define DBX_CONTIN_LENGTH 4000
+/* mips-tfile.c limits us to strings of one page. We must underestimate this
+ number, because the real length runs past this up to the next
+ continuation point. This is really a dbxout.c bug. */
+#define DBX_CONTIN_LENGTH 3000
/* By default, turn on GDB extensions. */
#define DEFAULT_GDB_EXTENSIONS 1
+/* Stabs-in-ECOFF can't handle dbxout_function_end(). */
+#define NO_DBX_FUNCTION_END 1
+
/* If we are smuggling stabs through the ALPHA ECOFF object
format, put a comment in front of the .stab<x> operation so
that the ALPHA assembler does not choke. The mips-tfile program
@@ -2011,47 +2417,7 @@ do { \
#define PUT_SDB_FUNCTION_END(LINE)
-#define PUT_SDB_EPILOGUE_END(NAME)
-
-/* No point in running CPP on our assembler output. */
-#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
-/* Don't pass -g to GNU as, because some versions don't accept this option. */
-#define ASM_SPEC "%{malpha-as:-g} -nocpp %{pg}"
-#else
-/* In OSF/1 v3.2c, the assembler by default does not output file names which
- causes mips-tfile to fail. Passing -g to the assembler fixes this problem.
- ??? Stricly speaking, we only need -g if the user specifies -g. Passing
- it always means that we get slightly larger than necessary object files
- if the user does not specify -g. If we don't pass -g, then mips-tfile
- will need to be fixed to work in this case. */
-#define ASM_SPEC "%{!mgas:-g} -nocpp %{pg}"
-#endif
-
-/* Specify to run a post-processor, mips-tfile after the assembler
- has run to stuff the ecoff debug information into the object file.
- This is needed because the Alpha assembler provides no way
- of specifying such information in the assembly file. */
-
-#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
-
-#define ASM_FINAL_SPEC "\
-%{malpha-as: %{!mno-mips-tfile: \
- \n mips-tfile %{v*: -v} \
- %{K: -I %b.o~} \
- %{!K: %{save-temps: -I %b.o~}} \
- %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
- %{.s:%i} %{!.s:%g.s}}}"
-
-#else
-#define ASM_FINAL_SPEC "\
-%{!mgas: %{!mno-mips-tfile: \
- \n mips-tfile %{v*: -v} \
- %{K: -I %b.o~} \
- %{!K: %{save-temps: -I %b.o~}} \
- %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
- %{.s:%i} %{!.s:%g.s}}}"
-
-#endif
+#define PUT_SDB_EPILOGUE_END(NAME) ((void)(NAME))
/* Macros for mips-tfile.c to encapsulate stabs in ECOFF, and for
mips-tdump.c to print them out.
@@ -2073,16 +2439,55 @@ do { \
#define ALIGN_SYMTABLE_OFFSET(OFFSET) (((OFFSET) + 7) & ~7)
-/* The system headers under OSF/1 are C++-aware. */
-#define NO_IMPLICIT_EXTERN_C
-
-/* Also define __LANGUAGE_C__ when running fix-header. */
-#define FIXPROTO_INIT(CPPFILE) cpp_define (CPPFILE, "__LANGUAGE_C__")
-
/* The linker will stick __main into the .init section. */
#define HAS_INIT_SECTION
#define LD_INIT_SWITCH "-init"
#define LD_FINI_SWITCH "-fini"
-/* We do want to link in libgcc when building shared libraries under OSF/1. */
-#define LIBGCC_SPEC "-lgcc"
+/* The system headers under Alpha systems are generally C++-aware. */
+#define NO_IMPLICIT_EXTERN_C
+
+/* Prototypes for alpha.c functions used in the md file & elsewhere. */
+extern struct rtx_def *get_unaligned_address ();
+extern void alpha_write_verstamp ();
+extern void alpha_reorg ();
+extern int check_float_value ();
+extern int direct_return ();
+extern int const48_operand ();
+extern int add_operand ();
+extern int and_operand ();
+extern int unaligned_memory_operand ();
+extern int zap_mask ();
+extern int current_file_function_operand ();
+extern int alpha_sa_size ();
+extern int alpha_adjust_cost ();
+extern void print_operand ();
+extern int reg_or_0_operand ();
+extern int reg_or_8bit_operand ();
+extern int mul8_operand ();
+extern int reg_or_6bit_operand ();
+extern int alpha_comparison_operator ();
+extern int alpha_swapped_comparison_operator ();
+extern int sext_add_operand ();
+extern int cint8_operand ();
+extern int mode_mask_operand ();
+extern int or_operand ();
+extern int mode_width_operand ();
+extern int reg_or_fp0_operand ();
+extern int signed_comparison_operator ();
+extern int fp0_operand ();
+extern int some_operand ();
+extern int input_operand ();
+extern int divmod_operator ();
+extern int call_operand ();
+extern int reg_or_cint_operand ();
+extern int hard_fp_register_operand ();
+extern void alpha_set_memflags ();
+extern int aligned_memory_operand ();
+extern void get_aligned_mem ();
+extern void alpha_expand_unaligned_load ();
+extern void alpha_expand_unaligned_store ();
+extern int alpha_expand_block_move ();
+extern int alpha_expand_block_clear ();
+extern void alpha_expand_prologue ();
+extern void alpha_expand_epilogue ();
diff --git a/contrib/gcc/config/alpha/alpha.md b/contrib/gcc/config/alpha/alpha.md
index 72c54ee..87ebf95 100644
--- a/contrib/gcc/config/alpha/alpha.md
+++ b/contrib/gcc/config/alpha/alpha.md
@@ -1,5 +1,5 @@
;; Machine description for DEC Alpha for GNU C compiler
-;; Copyright (C) 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
+;; Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
;; This file is part of GNU CC.
@@ -20,71 +20,396 @@
;; Boston, MA 02111-1307, USA.
;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; Uses of UNSPEC in this file:
+;;
+;; 0 arg_home
+;; 1 cttz
+;; 2 insxh
+;; 3 mskxh
+;; 4 cvtlq
+;; 5 cvtql
+;;
+;; UNSPEC_VOLATILE:
+;;
+;; 0 imb
+;; 1 blockage
+;; 2 builtin_setjmp_receiver
+;; 3 builtin_longjmp
+;; 4 trapb
+;; 5 prologue_stack_probe_loop
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in alpha.h.
+
+(define_attr "cpu" "ev4,ev5,ev6"
+ (const (symbol_ref "alpha_cpu")))
+
;; Define an insn type attribute. This is used in function unit delay
;; computations, among other purposes. For the most part, we use the names
;; defined in the EV4 documentation, but add a few that we have to know about
;; separately.
(define_attr "type"
- "ld,st,ibr,fbr,jsr,iaddlog,shiftcm,icmp,imull,imulq,fpop,fdivs,fdivt,ldsym,isubr"
- (const_string "shiftcm"))
-
-;; We include four function units: ABOX, which computes the address,
-;; BBOX, used for branches, EBOX, used for integer operations, and FBOX,
-;; used for FP operations.
-;;
-;; We assume that we have been successful in getting double issues and
-;; hence multiply all costs by two insns per cycle. The minimum time in
-;; a function unit is 2 cycle, which will tend to produce the double
-;; issues.
+ "ild,fld,ldsym,ist,fst,ibr,fbr,jsr,iadd,ilog,shift,icmov,fcmov,icmp,imul,fadd,fmul,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof"
+ (const_string "iadd"))
-;; Memory delivers its result in three cycles.
-(define_function_unit "abox" 1 0 (eq_attr "type" "ld,ldsym,st") 6 2)
+;; Define the operand size an insn operates on. Used primarily by mul
+;; and div operations that have size dependant timings.
-;; Branches have no delay cost, but do tie up the unit for two cycles.
-(define_function_unit "bbox" 1 1 (eq_attr "type" "ibr,fbr,jsr") 4 4)
+(define_attr "opsize" "si,di,udi" (const_string "di"))
-;; Arithmetic insns are normally have their results available after two
-;; cycles. There are a number of exceptions. They are encoded in
-;; ADJUST_COST. Some of the other insns have similar exceptions.
+;; The TRAP_TYPE attribute marks instructions that may generate traps
+;; (which are imprecise and may need a trapb if software completion
+;; is desired).
-(define_function_unit "ebox" 1 0 (eq_attr "type" "iaddlog,shiftcm,icmp") 4 2)
+(define_attr "trap" "no,yes" (const_string "no"))
-;; These really don't take up the integer pipeline, but they do occupy
-;; IBOX1; we approximate here.
+;; The length of an instruction sequence in bytes.
-(define_function_unit "ebox" 1 0 (eq_attr "type" "imull") 42 2)
-(define_function_unit "ebox" 1 0 (eq_attr "type" "imulq") 46 2)
-
-(define_function_unit "imult" 1 0 (eq_attr "type" "imull") 42 38)
-(define_function_unit "imult" 1 0 (eq_attr "type" "imulq") 46 42)
-
-(define_function_unit "fbox" 1 0 (eq_attr "type" "fpop") 12 2)
-
-(define_function_unit "fbox" 1 0 (eq_attr "type" "fdivs") 68 0)
-(define_function_unit "fbox" 1 0 (eq_attr "type" "fdivt") 126 0)
+(define_attr "length" "" (const_int 4))
+
+;; On EV4 there are two classes of resources to consider: resources needed
+;; to issue, and resources needed to execute. IBUS[01] are in the first
+;; category. ABOX, BBOX, EBOX, FBOX, IMUL & FDIV make up the second.
+;; (There are a few other register-like resources, but ...)
+
+; First, describe all of the issue constraints with single cycle delays.
+; All insns need a bus, but all except loads require one or the other.
+(define_function_unit "ev4_ibus0" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "fst,fbr,iadd,imul,ilog,shift,icmov,icmp"))
+ 1 1)
+
+(define_function_unit "ev4_ibus1" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "ist,ibr,jsr,fadd,fcmov,fcpys,fmul,fdiv,misc"))
+ 1 1)
+
+; Memory delivers its result in three cycles. Actually return one and
+; take care of this in adjust_cost, since we want to handle user-defined
+; memory latencies.
+(define_function_unit "ev4_abox" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "ild,fld,ldsym,ist,fst"))
+ 1 1)
+
+; Branches have no delay cost, but do tie up the unit for two cycles.
+(define_function_unit "ev4_bbox" 1 1
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "ibr,fbr,jsr"))
+ 2 2)
+
+; Arithmetic insns are normally have their results available after
+; two cycles. There are a number of exceptions. They are encoded in
+; ADJUST_COST. Some of the other insns have similar exceptions.
+(define_function_unit "ev4_ebox" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "iadd,ilog,shift,icmov,icmp,misc"))
+ 2 1)
+
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "si")))
+ 21 19)
+
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "!si")))
+ 23 21)
+
+(define_function_unit "ev4_fbox" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (eq_attr "type" "fadd,fmul,fcpys,fcmov"))
+ 6 1)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ 34 30)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev4")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ 63 59)
+
+;; EV5 scheduling. EV5 can issue 4 insns per clock.
+;;
+;; EV5 has two asymetric integer units. Model this with E0 & E1 along
+;; with the combined resource EBOX.
+
+(define_function_unit "ev5_ebox" 2 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "!fbr,fcmov,fadd,fmul,fcpys,fdiv"))
+ 1 1)
+
+; Memory takes at least 2 clocks. Return one from here and fix up with
+; user-defined latencies in adjust_cost.
+; ??? How to: "An instruction of class LD cannot be issued in the _second_
+; cycle after an instruction of class ST is issued."
+(define_function_unit "ev5_ebox" 2 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "ild,fld,ldsym"))
+ 1 1)
+
+; Stores, shifts, multiplies can only issue to E0
+(define_function_unit "ev5_e0" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "ist,fst,shift,imul"))
+ 1 1)
+
+; Motion video insns also issue only to E0, and take two ticks.
+(define_function_unit "ev5_e0" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "mvi"))
+ 2 1)
+
+; Conditional moves always take 2 ticks.
+(define_function_unit "ev5_ebox" 2 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "icmov"))
+ 2 1)
+
+; Branches can only issue to E1
+(define_function_unit "ev5_e1" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "ibr,jsr"))
+ 1 1)
+
+; Multiplies also use the integer multiplier.
+; ??? How to: "No instruction can be issued to pipe E0 exactly two
+; cycles before an integer multiplication completes."
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "si")))
+ 8 4)
+
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "di")))
+ 12 8)
+
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (and (eq_attr "type" "imul")
+ (eq_attr "opsize" "udi")))
+ 14 8)
+
+;; Similarly for the FPU we have two asymetric units. But fcpys can issue
+;; on either so we have to play the game again.
+
+(define_function_unit "ev5_fbox" 2 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "fadd,fcmov,fmul,fcpys,fbr,fdiv"))
+ 4 1)
+
+(define_function_unit "ev5_fm" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "fmul"))
+ 4 1)
+
+; Add and cmov as you would expect; fbr never produces a result;
+; fdiv issues through fa to the divider,
+(define_function_unit "ev5_fa" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (eq_attr "type" "fadd,fcmov,fbr,fdiv"))
+ 4 1)
+
+; ??? How to: "No instruction can be issued to pipe FA exactly five
+; cycles before a floating point divide completes."
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ 15 15) ; 15 to 31 data dependant
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev5")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ 22 22) ; 22 to 60 data dependant
+
+;; EV6 scheduling. EV6 can issue 4 insns per clock.
+;;
+;; EV6 has two symmetric pairs ("clusters") of two asymetric integer units
+;; ("upper" and "lower"), yielding pipe names U0, U1, L0, L1.
+
+;; Conditional moves decompose into two independant primitives, each
+;; taking one cycle. Since ev6 is out-of-order, we can't see anything
+;; but two cycles.
+(define_function_unit "ev6_ebox" 4 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "icmov"))
+ 2 1)
+
+(define_function_unit "ev6_ebox" 4 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "!fbr,fcmov,fadd,fmul,fcpys,fdiv,fsqrt"))
+ 1 1)
+
+;; Integer loads take at least 3 clocks, and only issue to lower units.
+;; Return one from here and fix up with user-defined latencies in adjust_cost.
+(define_function_unit "ev6_l" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "ild,ldsym,ist,fst"))
+ 1 1)
+
+;; FP loads take at least 4 clocks. Return two from here...
+(define_function_unit "ev6_l" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "fld"))
+ 2 1)
+
+;; Motion video insns also issue only to U0, and take three ticks.
+(define_function_unit "ev6_u0" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "mvi"))
+ 3 1)
+
+(define_function_unit "ev6_u" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "mvi"))
+ 3 1)
+
+;; Shifts issue to either upper pipe.
+(define_function_unit "ev6_u" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+;; Multiplies issue only to U1, and all take 7 ticks.
+;; Rather than create a new function unit just for U1, reuse IMUL
+(define_function_unit "imul" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "imul"))
+ 7 1)
+
+(define_function_unit "ev6_u" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "imul"))
+ 7 1)
+
+;; Branches issue to either upper pipe
+(define_function_unit "ev6_u" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "ibr"))
+ 3 1)
+
+;; Calls only issue to L0.
+(define_function_unit "ev6_l0" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "jsr"))
+ 1 1)
+
+(define_function_unit "ev6_l" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "jsr"))
+ 1 1)
+
+;; Ftoi/itof only issue to lower pipes
+(define_function_unit "ev6_l" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "ftoi"))
+ 3 1)
+
+(define_function_unit "ev6_l" 2 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "itof"))
+ 4 1)
+
+;; For the FPU we are very similar to EV5, except there's no insn that
+;; can issue to fm & fa, so we get to leave that out.
+
+(define_function_unit "ev6_fm" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "fmul"))
+ 4 1)
+
+(define_function_unit "ev6_fa" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "fadd,fcpys,fbr,fdiv,fsqrt"))
+ 4 1)
+
+(define_function_unit "ev6_fa" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (eq_attr "type" "fcmov"))
+ 8 1)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "si")))
+ 12 10)
+
+(define_function_unit "fdiv" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (and (eq_attr "type" "fdiv")
+ (eq_attr "opsize" "di")))
+ 15 13)
+
+(define_function_unit "fsqrt" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "opsize" "si")))
+ 16 14)
+
+(define_function_unit "fsqrt" 1 0
+ (and (eq_attr "cpu" "ev6")
+ (and (eq_attr "type" "fsqrt")
+ (eq_attr "opsize" "di")))
+ 32 30)
+
+; ??? The FPU communicates with memory and the integer register file
+; via two fp store units. We need a slot in the fst immediately, and
+; a slot in LOW after the operand data is ready. At which point the
+; data may be moved either to the store queue or the integer register
+; file and the insn retired.
-(define_function_unit "divider" 1 0 (eq_attr "type" "fdivs") 68 60)
-(define_function_unit "divider" 1 0 (eq_attr "type" "fdivt") 126 118)
;; First define the arithmetic insns. Note that the 32-bit forms also
;; sign-extend.
-;; Note that we can do sign extensions in both FP and integer registers.
-;; However, the result must be in the same type of register as the input.
-;; The register preferencing code can't handle this case very well, so, for
-;; now, don't let the FP case show up here for preferencing. Also,
-;; sign-extends in FP registers take two instructions.
+;; Handle 32-64 bit extension from memory to a floating point register
+;; specially, since this ocurrs frequently in int->double conversions.
+;; This is done with a define_split after reload converting the plain
+;; sign-extension into a load+unspec, which of course results in lds+cvtlq.
+;;
+;; Note that while we must retain the =f case in the insn for reload's
+;; benefit, it should be eliminated after reload, so we should never emit
+;; code for that case. But we don't reject the possibility.
+
(define_insn "extendsidi2"
- [(set (match_operand:DI 0 "register_operand" "=r,r,*f")
- (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,*f")))]
+ [(set (match_operand:DI 0 "register_operand" "=r,r,?f")
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "r,m,m")))]
""
"@
addl %1,$31,%0
ldl %0,%1
- cvtql %1,%0\;cvtlq %0,%0"
- [(set_attr "type" "iaddlog,ld,fpop")])
+ lds %0,%1\;cvtlq %0,%0"
+ [(set_attr "type" "iadd,ild,fld")
+ (set_attr "length" "*,*,8")])
+
+;; Due to issues with CLASS_CANNOT_CHANGE_SIZE, we cannot use a subreg here.
+(define_split
+ [(set (match_operand:DI 0 "hard_fp_register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "memory_operand" "")))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 1))
+ (set (match_dup 0) (unspec:DI [(match_dup 2)] 4))]
+ "operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]));")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:SI 1 "register_operand" "f")] 4))]
+ ""
+ "cvtlq %1,%0"
+ [(set_attr "type" "fadd")])
;; Do addsi3 the way expand_binop would do if we didn't have one. This
;; generates better code. We have the anonymous addsi3 pattern below in
@@ -95,10 +420,10 @@
(match_operand:SI 2 "add_operand" "")))]
""
"
-{ emit_insn (gen_rtx (SET, VOIDmode, gen_lowpart (DImode, operands[0]),
- gen_rtx (PLUS, DImode,
- gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2]))));
+{ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (DImode, operands[0]),
+ gen_rtx_PLUS (DImode,
+ gen_lowpart (DImode, operands[1]),
+ gen_lowpart (DImode, operands[2]))));
DONE;
} ")
@@ -111,8 +436,7 @@
addl %r1,%2,%0
subl %r1,%n2,%0
lda %0,%2(%r1)
- ldah %0,%h2(%r1)"
- [(set_attr "type" "iaddlog")])
+ ldah %0,%h2(%r1)")
(define_split
[(set (match_operand:SI 0 "register_operand" "")
@@ -139,8 +463,7 @@
""
"@
addl %r1,%2,%0
- subl %r1,%n2,%0"
- [(set_attr "type" "iaddlog")])
+ subl %r1,%n2,%0")
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -179,8 +502,8 @@
(set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 7) (match_dup 4))))]
"
{
- operands[6] = gen_rtx (GET_CODE (operands[1]), DImode,
- operands[2], operands[3]);
+ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
operands[7] = gen_lowpart (SImode, operands[5]);
}")
@@ -193,8 +516,7 @@
addq %r1,%2,%0
subq %r1,%n2,%0
lda %0,%2(%r1)
- ldah %0,%h2(%r1)"
- [(set_attr "type" "iaddlog")])
+ ldah %0,%h2(%r1)")
;; Don't do this if we are adjusting SP since we don't want to do
;; it in two steps.
@@ -224,8 +546,7 @@
""
"@
s%2addl %r1,%3,%0
- s%2subl %r1,%n3,%0"
- [(set_attr "type" "iaddlog")])
+ s%2subl %r1,%n3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
@@ -236,8 +557,7 @@
""
"@
s%2addl %r1,%3,%0
- s%2subl %r1,%n3,%0"
- [(set_attr "type" "iaddlog")])
+ s%2subl %r1,%n3,%0")
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -255,8 +575,8 @@
(match_dup 5))))]
"
{
- operands[7] = gen_rtx (GET_CODE (operands[1]), DImode,
- operands[2], operands[3]);
+ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode,
+ operands[2], operands[3]);
operands[8] = gen_lowpart (SImode, operands[6]);
}")
@@ -268,8 +588,7 @@
""
"@
s%2addq %r1,%3,%0
- s%2subq %1,%n3,%0"
- [(set_attr "type" "iaddlog")])
+ s%2subq %1,%n3,%0")
;; These variants of the above insns can occur if the third operand
;; is the frame pointer. This is a kludge, but there doesn't
@@ -281,8 +600,7 @@
(match_operand:DI 2 "some_operand" "r"))
(match_operand:DI 3 "some_operand" "rIOKL")))]
"reload_in_progress"
- "#"
- [(set_attr "type" "iaddlog")])
+ "#")
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -301,8 +619,7 @@
(match_operand:SI 3 "some_operand" "r"))
(match_operand:SI 4 "some_operand" "rIOKL")))]
"reload_in_progress"
- "#"
- [(set_attr "type" "iaddlog")])
+ "#")
(define_split
[(set (match_operand:SI 0 "register_operand" "r")
@@ -325,8 +642,7 @@
(match_operand:SI 3 "some_operand" "r"))
(match_operand:SI 4 "some_operand" "rIOKL"))))]
"reload_in_progress"
- "#"
- [(set_attr "type" "iaddlog")])
+ "#")
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -351,8 +667,7 @@
(match_operand:DI 3 "some_operand" "r"))
(match_operand:DI 4 "some_operand" "rIOKL")))]
"reload_in_progress"
- "#"
- [(set_attr "type" "iaddlog")])
+ "#")
(define_split
[(set (match_operand:DI 0 "register_operand" "=")
@@ -370,23 +685,20 @@
[(set (match_operand:SI 0 "register_operand" "=r")
(neg:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))]
""
- "subl $31,%1,%0"
- [(set_attr "type" "iaddlog")])
+ "subl $31,%1,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (neg:SI
(match_operand:SI 1 "reg_or_8bit_operand" "rI"))))]
""
- "subl $31,%1,%0"
- [(set_attr "type" "iaddlog")])
+ "subl $31,%1,%0")
(define_insn "negdi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(neg:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
""
- "subq $31,%1,%0"
- [(set_attr "type" "iaddlog")])
+ "subq $31,%1,%0")
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "")
@@ -394,12 +706,11 @@
(match_operand:SI 2 "reg_or_8bit_operand" "")))]
""
"
-{ emit_insn (gen_rtx (SET, VOIDmode, gen_lowpart (DImode, operands[0]),
- gen_rtx (MINUS, DImode,
- gen_lowpart (DImode, operands[1]),
- gen_lowpart (DImode, operands[2]))));
+{ emit_insn (gen_rtx_SET (VOIDmode, gen_lowpart (DImode, operands[0]),
+ gen_rtx_MINUS (DImode,
+ gen_lowpart (DImode, operands[1]),
+ gen_lowpart (DImode, operands[2]))));
DONE;
-
} ")
(define_insn ""
@@ -407,24 +718,21 @@
(minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
(match_operand:SI 2 "reg_or_8bit_operand" "rI")))]
""
- "subl %r1,%2,%0"
- [(set_attr "type" "iaddlog")])
+ "subl %r1,%2,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(sign_extend:DI (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
(match_operand:SI 2 "reg_or_8bit_operand" "rI"))))]
""
- "subl %r1,%2,%0"
- [(set_attr "type" "iaddlog")])
+ "subl %r1,%2,%0")
(define_insn "subdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
(match_operand:DI 2 "reg_or_8bit_operand" "rI")))]
""
- "subq %r1,%2,%0"
- [(set_attr "type" "iaddlog")])
+ "subq %r1,%2,%0")
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -432,8 +740,7 @@
(match_operand:SI 2 "const48_operand" "I"))
(match_operand:SI 3 "reg_or_8bit_operand" "rI")))]
""
- "s%2subl %r1,%3,%0"
- [(set_attr "type" "iaddlog")])
+ "s%2subl %r1,%3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -442,8 +749,7 @@
(match_operand:SI 2 "const48_operand" "I"))
(match_operand:SI 3 "reg_or_8bit_operand" "rI"))))]
""
- "s%2subl %r1,%3,%0"
- [(set_attr "type" "iaddlog")])
+ "s%2subl %r1,%3,%0")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -451,8 +757,7 @@
(match_operand:DI 2 "const48_operand" "I"))
(match_operand:DI 3 "reg_or_8bit_operand" "rI")))]
""
- "s%2subq %r1,%3,%0"
- [(set_attr "type" "iaddlog")])
+ "s%2subq %r1,%3,%0")
(define_insn "mulsi3"
[(set (match_operand:SI 0 "register_operand" "=r")
@@ -460,7 +765,8 @@
(match_operand:SI 2 "reg_or_0_operand" "rJ")))]
""
"mull %r1,%r2,%0"
- [(set_attr "type" "imull")])
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "si")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -468,7 +774,8 @@
(match_operand:SI 2 "reg_or_0_operand" "rJ"))))]
""
"mull %r1,%r2,%0"
- [(set_attr "type" "imull")])
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "si")])
(define_insn "muldi3"
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -476,7 +783,7 @@
(match_operand:DI 2 "reg_or_0_operand" "rJ")))]
""
"mulq %r1,%r2,%0"
- [(set_attr "type" "imulq")])
+ [(set_attr "type" "imul")])
(define_insn "umuldi3_highpart"
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -487,7 +794,8 @@
(const_int 64))))]
""
"umulh %1,%2,%0"
- [(set_attr "type" "imulq")])
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "udi")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -498,61 +806,71 @@
(const_int 64))))]
""
"umulh %1,%2,%0"
- [(set_attr "type" "imulq")])
+ [(set_attr "type" "imul")
+ (set_attr "opsize" "udi")])
;; The divide and remainder operations always take their inputs from
;; r24 and r25, put their output in r27, and clobber r23 and r28.
+;; ??? Force sign-extension here because some versions of OSF/1 don't
+;; do the right thing if the inputs are not properly sign-extended.
+;; But Linux, for instance, does not have this problem. Is it worth
+;; the complication here to eliminate the sign extension?
+
(define_expand "divsi3"
- [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
- (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
- (parallel [(set (reg:SI 27)
- (div:SI (reg:SI 24)
- (reg:SI 25)))
+ [(set (reg:DI 24)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (reg:DI 25)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (reg:DI 27)
+ (sign_extend:DI (div:SI (reg:DI 24) (reg:DI 25))))
(clobber (reg:DI 23))
(clobber (reg:DI 28))])
(set (match_operand:SI 0 "general_operand" "")
- (reg:SI 27))]
- ""
+ (subreg:SI (reg:DI 27) 0))]
+ "!TARGET_OPEN_VMS"
"")
(define_expand "udivsi3"
- [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
- (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
- (parallel [(set (reg:SI 27)
- (udiv:SI (reg:SI 24)
- (reg:SI 25)))
+ [(set (reg:DI 24)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (reg:DI 25)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (reg:DI 27)
+ (sign_extend:DI (udiv:SI (reg:DI 24) (reg:DI 25))))
(clobber (reg:DI 23))
(clobber (reg:DI 28))])
(set (match_operand:SI 0 "general_operand" "")
- (reg:SI 27))]
- ""
+ (subreg:SI (reg:DI 27) 0))]
+ "!TARGET_OPEN_VMS"
"")
(define_expand "modsi3"
- [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
- (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
- (parallel [(set (reg:SI 27)
- (mod:SI (reg:SI 24)
- (reg:SI 25)))
+ [(set (reg:DI 24)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (reg:DI 25)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (reg:DI 27)
+ (sign_extend:DI (mod:SI (reg:DI 24) (reg:DI 25))))
(clobber (reg:DI 23))
(clobber (reg:DI 28))])
(set (match_operand:SI 0 "general_operand" "")
- (reg:SI 27))]
- ""
+ (subreg:SI (reg:DI 27) 0))]
+ "!TARGET_OPEN_VMS"
"")
(define_expand "umodsi3"
- [(set (reg:SI 24) (match_operand:SI 1 "input_operand" ""))
- (set (reg:SI 25) (match_operand:SI 2 "input_operand" ""))
- (parallel [(set (reg:SI 27)
- (umod:SI (reg:SI 24)
- (reg:SI 25)))
+ [(set (reg:DI 24)
+ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand" "")))
+ (set (reg:DI 25)
+ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "")))
+ (parallel [(set (reg:DI 27)
+ (sign_extend:DI (umod:SI (reg:DI 24) (reg:DI 25))))
(clobber (reg:DI 23))
(clobber (reg:DI 28))])
(set (match_operand:SI 0 "general_operand" "")
- (reg:SI 27))]
- ""
+ (subreg:SI (reg:DI 27) 0))]
+ "!TARGET_OPEN_VMS"
"")
(define_expand "divdi3"
@@ -565,7 +883,7 @@
(clobber (reg:DI 28))])
(set (match_operand:DI 0 "general_operand" "")
(reg:DI 27))]
- ""
+ "!TARGET_OPEN_VMS"
"")
(define_expand "udivdi3"
@@ -578,7 +896,7 @@
(clobber (reg:DI 28))])
(set (match_operand:DI 0 "general_operand" "")
(reg:DI 27))]
- ""
+ "!TARGET_OPEN_VMS"
"")
(define_expand "moddi3"
@@ -591,7 +909,7 @@
(clobber (reg:DI 28))])
(set (match_operand:DI 0 "general_operand" "")
(reg:DI 27))]
- ""
+ "!TARGET_OPEN_VMS"
"")
(define_expand "umoddi3"
@@ -604,18 +922,21 @@
(clobber (reg:DI 28))])
(set (match_operand:DI 0 "general_operand" "")
(reg:DI 27))]
- ""
+ "!TARGET_OPEN_VMS"
"")
+;; Lengths of 8 for ldq $t12,__divq($gp); jsr $t9,($t12),__divq as
+;; expanded by the assembler.
(define_insn ""
- [(set (reg:SI 27)
- (match_operator:SI 1 "divmod_operator"
- [(reg:SI 24) (reg:SI 25)]))
+ [(set (reg:DI 27)
+ (sign_extend:DI (match_operator:SI 1 "divmod_operator"
+ [(reg:DI 24) (reg:DI 25)])))
(clobber (reg:DI 23))
(clobber (reg:DI 28))]
- ""
+ "!TARGET_OPEN_VMS"
"%E1 $24,$25,$27"
- [(set_attr "type" "isubr")])
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
(define_insn ""
[(set (reg:DI 27)
@@ -623,9 +944,10 @@
[(reg:DI 24) (reg:DI 25)]))
(clobber (reg:DI 23))
(clobber (reg:DI 28))]
- ""
+ "!TARGET_OPEN_VMS"
"%E1 $24,$25,$27"
- [(set_attr "type" "isubr")])
+ [(set_attr "type" "jsr")
+ (set_attr "length" "8")])
;; Next are the basic logical operations. These only exist in DImode.
@@ -638,12 +960,12 @@
and %r1,%2,%0
bic %r1,%N2,%0
zapnot %r1,%m2,%0"
- [(set_attr "type" "iaddlog,iaddlog,shiftcm")])
+ [(set_attr "type" "ilog,ilog,shift")])
-;; There are times when we can split and AND into two AND insns. This occurs
+;; There are times when we can split an AND into two AND insns. This occurs
;; when we can first clear any bytes and then clear anything else. For
;; example "I & 0xffff07" is "(I & 0xffffff) & 0xffffffffffffff07".
-;; Only to this when running on 64-bit host since the computations are
+;; Only do this when running on 64-bit host since the computations are
;; too messy otherwise.
(define_split
@@ -675,43 +997,103 @@
[(set (match_operand:HI 0 "register_operand" "=r")
(zero_extend:HI (match_operand:QI 1 "register_operand" "r")))]
""
- "zapnot %1,1,%0"
- [(set_attr "type" "iaddlog")])
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ and %1,0xff,%0
+ ldbu %0,%1"
+ [(set_attr "type" "ilog,ild")])
-(define_insn "zero_extendqisi2"
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
""
- "zapnot %1,1,%0"
- [(set_attr "type" "iaddlog")])
+ "")
-(define_insn "zero_extendqidi2"
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ and %1,0xff,%0
+ ldbu %0,%1"
+ [(set_attr "type" "ilog,ild")])
+
+(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "! TARGET_BWX"
+ "and %1,0xff,%0"
+ [(set_attr "type" "ilog")])
+
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "")))]
""
- "zapnot %1,1,%0"
- [(set_attr "type" "iaddlog")])
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ zapnot %1,3,%0
+ ldwu %0,%1"
+ [(set_attr "type" "shift,ild")])
-(define_insn "zero_extendhisi2"
+(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=r")
(zero_extend:SI (match_operand:HI 1 "register_operand" "r")))]
- ""
+ "! TARGET_BWX"
"zapnot %1,3,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "shift")])
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:HI 1 "nonimmediate_operand" "r,m")))]
+ "TARGET_BWX"
+ "@
+ zapnot %1,3,%0
+ ldwu %0,%1"
+ [(set_attr "type" "shift,ild")])
-(define_insn "zero_extendhidi2"
+(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI (match_operand:HI 1 "register_operand" "r")))]
""
"zapnot %1,3,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "shift")])
+
+(define_expand "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "")
(define_insn "zero_extendsidi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extend:DI (match_operand:SI 1 "register_operand" "r")))]
""
"zapnot %1,15,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "shift")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -719,7 +1101,7 @@
(match_operand:DI 2 "reg_or_0_operand" "rJ")))]
""
"bic %r2,%1,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
(define_insn "iordi3"
[(set (match_operand:DI 0 "register_operand" "=r,r")
@@ -729,14 +1111,14 @@
"@
bis %r1,%2,%0
ornot %r1,%N2,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
(define_insn "one_cmpldi2"
[(set (match_operand:DI 0 "register_operand" "=r")
(not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))]
""
"ornot $31,%1,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -744,7 +1126,7 @@
(match_operand:DI 2 "reg_or_0_operand" "rJ")))]
""
"ornot %r2,%1,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
(define_insn "xordi3"
[(set (match_operand:DI 0 "register_operand" "=r,r")
@@ -754,7 +1136,7 @@
"@
xor %r1,%2,%0
eqv %r1,%N2,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
@@ -762,14 +1144,40 @@
(match_operand:DI 2 "register_operand" "rI"))))]
""
"eqv %r1,%2,%0"
- [(set_attr "type" "iaddlog")])
+ [(set_attr "type" "ilog")])
+
+;; Handle the FFS insn if we support CIX.
+
+(define_expand "ffsdi2"
+ [(set (match_dup 2)
+ (unspec [(match_operand:DI 1 "register_operand" "")] 1))
+ (set (match_dup 3)
+ (plus:DI (match_dup 2) (const_int 1)))
+ (set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (eq (match_dup 1) (const_int 0))
+ (const_int 0) (match_dup 3)))]
+ "TARGET_CIX"
+ "
+{
+ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_reg_rtx (DImode);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec [(match_operand:DI 1 "register_operand" "r")] 1))]
+ "TARGET_CIX"
+ "cttz %1,%0"
+ ; ev6 calls all mvi and cttz/ctlz/popc class imisc, so just
+ ; reuse the existing type name.
+ [(set_attr "type" "mvi")])
;; Next come the shifts and the various extract and insert operations.
(define_insn "ashldi3"
[(set (match_operand:DI 0 "register_operand" "=r,r")
(ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ")
- (match_operand:DI 2 "reg_or_6bit_operand" "P,rI")))]
+ (match_operand:DI 2 "reg_or_6bit_operand" "P,rS")))]
""
"*
{
@@ -782,9 +1190,11 @@
return \"s%P2addq %r1,0,%0\";
case 1:
return \"sll %r1,%2,%0\";
+ default:
+ abort();
}
}"
- [(set_attr "type" "iaddlog,shiftcm")])
+ [(set_attr "type" "iadd,shift")])
;; ??? The following pattern is made by combine, but earlier phases
;; (specifically flow) can't handle it. This occurs in jump.c. Deal
@@ -803,224 +1213,469 @@
;; else
;; return \"s%P2addl %r1,0,%0\";
;; }"
-;; [(set_attr "type" "iaddlog")])
+;; [(set_attr "type" "iadd")])
(define_insn "lshrdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (match_operand:DI 2 "reg_or_6bit_operand" "rI")))]
+ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))]
""
- "srl %r1,%2,%0")
+ "srl %r1,%2,%0"
+ [(set_attr "type" "shift")])
(define_insn "ashrdi3"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (match_operand:DI 2 "reg_or_6bit_operand" "rI")))]
+ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))]
""
- "sra %r1,%2,%0")
+ "sra %r1,%2,%0"
+ [(set_attr "type" "shift")])
(define_expand "extendqihi2"
[(set (match_dup 2)
- (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
(const_int 56)))
(set (match_operand:HI 0 "register_operand" "")
(ashiftrt:DI (match_dup 2)
(const_int 56)))]
""
"
-{ operands[0] = gen_lowpart (DImode, operands[0]);
- operands[1] = gen_lowpart (DImode, operands[1]);
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqihi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to DImode (which we do
+ specially) and then copy to the result. */
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendqidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (HImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
operands[2] = gen_reg_rtx (DImode);
}")
+(define_insn "extendqidi2x"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendhidi2x"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextw %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendqisi2x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendhisi2x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextw %1,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extendqihi2x"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "r")))]
+ "TARGET_BWX"
+ "sextb %1,%0"
+ [(set_attr "type" "shift")])
+
(define_expand "extendqisi2"
[(set (match_dup 2)
- (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
(const_int 56)))
(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:DI (match_dup 2)
(const_int 56)))]
""
"
-{ operands[0] = gen_lowpart (DImode, operands[0]);
- operands[1] = gen_lowpart (DImode, operands[1]);
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqisi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to a DImode form of
+ the result (which we do specially). */
+ if (unaligned_memory_operand (operands[1], QImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendqidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (SImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
operands[2] = gen_reg_rtx (DImode);
}")
(define_expand "extendqidi2"
[(set (match_dup 2)
- (ashift:DI (match_operand:QI 1 "register_operand" "")
+ (ashift:DI (match_operand:QI 1 "some_operand" "")
(const_int 56)))
(set (match_operand:DI 0 "register_operand" "")
(ashiftrt:DI (match_dup 2)
(const_int 56)))]
""
"
-{ operands[1] = gen_lowpart (DImode, operands[1]);
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendqidi2x (operands[0],
+ force_reg (QImode, operands[1])));
+ DONE;
+ }
+
+ if (unaligned_memory_operand (operands[1], QImode))
+ {
+ rtx seq
+ = gen_unaligned_extendqidi (operands[0],
+ get_unaligned_address (operands[1], 1));
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (DImode, force_reg (QImode, operands[1]));
operands[2] = gen_reg_rtx (DImode);
}")
(define_expand "extendhisi2"
[(set (match_dup 2)
- (ashift:DI (match_operand:HI 1 "register_operand" "")
+ (ashift:DI (match_operand:HI 1 "some_operand" "")
(const_int 48)))
(set (match_operand:SI 0 "register_operand" "")
(ashiftrt:DI (match_dup 2)
(const_int 48)))]
""
"
-{ operands[0] = gen_lowpart (DImode, operands[0]);
- operands[1] = gen_lowpart (DImode, operands[1]);
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendhisi2x (operands[0],
+ force_reg (HImode, operands[1])));
+ DONE;
+ }
+
+ /* If we have an unaligned MEM, extend to a DImode form of
+ the result (which we do specially). */
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx temp = gen_reg_rtx (DImode);
+
+ emit_insn (gen_extendhidi2 (temp, operands[1]));
+ emit_move_insn (operands[0], gen_lowpart (SImode, temp));
+ DONE;
+ }
+
+ operands[0] = gen_lowpart (DImode, operands[0]);
+ operands[1] = gen_lowpart (DImode, force_reg (HImode, operands[1]));
operands[2] = gen_reg_rtx (DImode);
}")
(define_expand "extendhidi2"
[(set (match_dup 2)
- (ashift:DI (match_operand:HI 1 "register_operand" "")
+ (ashift:DI (match_operand:HI 1 "some_operand" "")
(const_int 48)))
(set (match_operand:DI 0 "register_operand" "")
(ashiftrt:DI (match_dup 2)
(const_int 48)))]
""
"
-{ operands[1] = gen_lowpart (DImode, operands[1]);
+{
+ if (TARGET_BWX)
+ {
+ emit_insn (gen_extendhidi2x (operands[0],
+ force_reg (HImode, operands[1])));
+ DONE;
+ }
+
+ if (unaligned_memory_operand (operands[1], HImode))
+ {
+ rtx seq
+ = gen_unaligned_extendhidi (operands[0],
+ get_unaligned_address (operands[1], 2));
+
+ alpha_set_memflags (seq, operands[1]);
+ emit_insn (seq);
+ DONE;
+ }
+
+ operands[1] = gen_lowpart (DImode, force_reg (HImode, operands[1]));
operands[2] = gen_reg_rtx (DImode);
}")
+;; Here's how we sign extend an unaligned byte and halfword. Doing this
+;; as a pattern saves one instruction. The code is similar to that for
+;; the unaligned loads (see below).
+;;
+;; Operand 1 is the address + 1 (+2 for HI), operand 0 is the result.
+(define_expand "unaligned_extendqidi"
+ [(set (match_dup 2) (match_operand:DI 1 "address_operand" ""))
+ (set (match_dup 3)
+ (mem:DI (and:DI (plus:DI (match_dup 2) (const_int -1))
+ (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (and:DI (plus:DI (match_dup 2) (const_int -1))
+ (const_int 7))
+ (const_int 3)))))
+ (set (subreg:DI (match_operand:QI 0 "register_operand" "") 0)
+ (ashiftrt:DI (match_dup 4) (const_int 56)))]
+ ""
+ "
+{ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+}")
+
+(define_expand "unaligned_extendhidi"
+ [(set (match_dup 2) (match_operand:DI 1 "address_operand" ""))
+ (set (match_dup 3)
+ (mem:DI (and:DI (plus:DI (match_dup 2) (const_int -2))
+ (const_int -8))))
+ (set (match_dup 4)
+ (ashift:DI (match_dup 3)
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (and:DI (plus:DI (match_dup 2) (const_int -1))
+ (const_int 7))
+ (const_int 3)))))
+ (set (subreg:DI (match_operand:QI 0 "register_operand" "") 0)
+ (ashiftrt:DI (match_dup 4) (const_int 48)))]
+ ""
+ "
+{ operands[2] = gen_reg_rtx (DImode);
+ operands[3] = gen_reg_rtx (DImode);
+ operands[4] = gen_reg_rtx (DImode);
+}")
+
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
(match_operand:DI 2 "mode_width_operand" "n")
(match_operand:DI 3 "mul8_operand" "I")))]
""
- "ext%M2l %r1,%s3,%0")
+ "ext%M2l %r1,%s3,%0"
+ [(set_attr "type" "shift")])
-(define_insn ""
+(define_insn "extxl"
[(set (match_operand:DI 0 "register_operand" "=r")
(zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
(match_operand:DI 2 "mode_width_operand" "n")
(ashift:DI (match_operand:DI 3 "reg_or_8bit_operand" "rI")
(const_int 3))))]
""
- "ext%M2l %r1,%3,%0")
+ "ext%M2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
-(define_insn ""
+(define_insn "extqh"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI
- (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (const_int 8)
- (ashift:DI
- (plus:DI
- (match_operand:DI 2 "reg_or_8bit_operand" "rI")
- (const_int -1))
- (const_int 3)))
- (const_int 56)))]
- ""
- "extqh %r1,%2,%0")
-
-(define_insn ""
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -1))
+ (const_int 7))
+ (const_int 3)))))]
+ ""
+ "extqh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extlh"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI
- (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (const_int 16)
- (ashift:DI
- (plus:DI
- (match_operand:DI 2 "reg_or_8bit_operand" "rI")
- (const_int -2))
- (const_int 3)))
- (const_int 48)))]
- ""
- "extwh %r1,%2,%0")
-
-(define_insn ""
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 2147483647))
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -1))
+ (const_int 7))
+ (const_int 3)))))]
+ ""
+ "extlh %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "extwh"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI
- (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
- (const_int 32)
- (ashift:DI
- (plus:DI
- (match_operand:DI 2 "reg_or_8bit_operand" "rI")
- (const_int -4))
- (const_int 3)))
- (const_int 32)))]
- ""
- "extlh %r1,%2,%0")
+ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (const_int 65535))
+ (minus:DI (const_int 56)
+ (ashift:DI
+ (and:DI
+ (plus:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int -1))
+ (const_int 7))
+ (const_int 3)))))]
+ ""
+ "extwh %r1,%2,%0"
+ [(set_attr "type" "shift")])
;; This converts an extXl into an extXh with an appropriate adjustment
;; to the address calculation.
-(define_split
- [(set (match_operand:DI 0 "register_operand" "")
- (ashift:DI (zero_extract:DI (match_operand:DI 1 "register_operand" "")
- (match_operand:DI 2 "mode_width_operand" "")
- (ashift:DI (match_operand:DI 3 "" "")
- (const_int 3)))
- (match_operand:DI 4 "const_int_operand" "")))
- (clobber (match_operand:DI 5 "register_operand" ""))]
- "INTVAL (operands[4]) == 64 - INTVAL (operands[2])"
- [(set (match_dup 5) (match_dup 6))
- (set (match_dup 0)
- (ashift:DI (zero_extract:DI (match_dup 1) (match_dup 2)
- (ashift:DI (plus:DI (match_dup 5)
- (match_dup 7))
- (const_int 3)))
- (match_dup 4)))]
- "
-{
- operands[6] = plus_constant (operands[3],
- INTVAL (operands[2]) / BITS_PER_UNIT);
- operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT);
-}")
+;;(define_split
+;; [(set (match_operand:DI 0 "register_operand" "")
+;; (ashift:DI (zero_extract:DI (match_operand:DI 1 "register_operand" "")
+;; (match_operand:DI 2 "mode_width_operand" "")
+;; (ashift:DI (match_operand:DI 3 "" "")
+;; (const_int 3)))
+;; (match_operand:DI 4 "const_int_operand" "")))
+;; (clobber (match_operand:DI 5 "register_operand" ""))]
+;; "INTVAL (operands[4]) == 64 - INTVAL (operands[2])"
+;; [(set (match_dup 5) (match_dup 6))
+;; (set (match_dup 0)
+;; (ashift:DI (zero_extract:DI (match_dup 1) (match_dup 2)
+;; (ashift:DI (plus:DI (match_dup 5)
+;; (match_dup 7))
+;; (const_int 3)))
+;; (match_dup 4)))]
+;; "
+;;{
+;; operands[6] = plus_constant (operands[3],
+;; INTVAL (operands[2]) / BITS_PER_UNIT);
+;; operands[7] = GEN_INT (- INTVAL (operands[2]) / BITS_PER_UNIT);
+;;}")
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
(match_operand:DI 2 "mul8_operand" "I")))]
""
- "insbl %1,%s2,%0")
+ "insbl %1,%s2,%0"
+ [(set_attr "type" "shift")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
(match_operand:DI 2 "mul8_operand" "I")))]
""
- "inswl %1,%s2,%0")
+ "inswl %1,%s2,%0"
+ [(set_attr "type" "shift")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(match_operand:DI 2 "mul8_operand" "I")))]
""
- "insll %1,%s2,%0")
+ "insll %1,%s2,%0"
+ [(set_attr "type" "shift")])
-(define_insn ""
+(define_insn "insbl"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:QI 1 "register_operand" "r"))
(ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
(const_int 3))))]
""
- "insbl %1,%2,%0")
+ "insbl %1,%2,%0"
+ [(set_attr "type" "shift")])
-(define_insn ""
+(define_insn "inswl"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" "r"))
(ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
(const_int 3))))]
""
- "inswl %1,%2,%0")
+ "inswl %1,%2,%0"
+ [(set_attr "type" "shift")])
-(define_insn ""
+(define_insn "insll"
[(set (match_operand:DI 0 "register_operand" "=r")
(ashift:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
(ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
(const_int 3))))]
""
- "insll %1,%2,%0")
+ "insll %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "insql"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (const_int 3))))]
+ ""
+ "insql %1,%2,%0"
+ [(set_attr "type" "shift")])
+
+;; Combine has this sometimes habit of moving the and outside of the
+;; shift, making life more interesting.
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mul8_operand" "I"))
+ (match_operand:DI 3 "immediate_operand" "i")))]
+ "HOST_BITS_PER_WIDE_INT == 64
+ && GET_CODE (operands[3]) == CONST_INT
+ && (((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
+ == INTVAL (operands[3]))
+ || ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
+ == INTVAL (operands[3]))
+ || ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
+ == INTVAL (operands[3])))"
+ "*
+{
+#if HOST_BITS_PER_WIDE_INT == 64
+ if ((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2])
+ == INTVAL (operands[3]))
+ return \"insbl %1,%s2,%0\";
+ if ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2])
+ == INTVAL (operands[3]))
+ return \"inswl %1,%s2,%0\";
+ if ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2])
+ == INTVAL (operands[3]))
+ return \"insll %1,%s2,%0\";
+#endif
+ abort();
+}"
+ [(set_attr "type" "shift")])
;; We do not include the insXh insns because they are complex to express
;; and it does not appear that we would ever want to generate them.
+;;
+;; Since we need them for block moves, though, cop out and use unspec.
-(define_insn ""
+(define_insn "insxh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")] 2))]
+ ""
+ "ins%M2h %1,%3,%0"
+ [(set_attr "type" "shift")])
+
+(define_insn "mskxl"
[(set (match_operand:DI 0 "register_operand" "=r")
(and:DI (not:DI (ashift:DI
(match_operand:DI 2 "mode_mask_operand" "n")
@@ -1029,10 +1684,22 @@
(const_int 3))))
(match_operand:DI 1 "reg_or_0_operand" "rJ")))]
""
- "msk%U2l %r1,%3,%0")
+ "msk%U2l %r1,%3,%0"
+ [(set_attr "type" "shift")])
-;; We do not include the mskXh insns because it does not appear we would ever
-;; generate one.
+;; We do not include the mskXh insns because it does not appear we would
+;; ever generate one.
+;;
+;; Again, we do for block moves and we use unspec again.
+
+(define_insn "mskxh"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (unspec [(match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "mode_width_operand" "n")
+ (match_operand:DI 3 "reg_or_8bit_operand" "rI")] 3))]
+ ""
+ "msk%M2h %1,%3,%0"
+ [(set_attr "type" "shift")])
;; Floating-point operations. All the double-precision insns can extend
;; from single, so indicate that. The exception are the ones that simply
@@ -1043,53 +1710,74 @@
(abs:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
"cpys $f31,%R1,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcpys")])
(define_insn "absdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(abs:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
"cpys $f31,%R1,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcpys")])
(define_insn "negsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(neg:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
"cpysn %R1,%R1,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fadd")])
(define_insn "negdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(neg:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
"cpysn %R1,%R1,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fadd")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (plus:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "add%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "addsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(plus:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
(match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "adds %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "add%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (plus:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "add%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "adddf3"
[(set (match_operand:DF 0 "register_operand" "=f")
(plus:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "addt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "add%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(plus:DF (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP"
- "addt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "add%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1097,120 +1785,344 @@
(match_operand:SF 1 "reg_or_fp0_operand" "%fG"))
(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "add%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+;; Define conversion operators between DFmode and SImode, using the cvtql
+;; instruction. To allow combine et al to do useful things, we keep the
+;; operation as a unit until after reload, at which point we split the
+;; instructions.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "")))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_FP && reload_completed"
+ [(set (match_dup 2) (fix:DI (match_dup 1)))
+ (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ "")
+
+;; Due to issues with CLASS_CANNOT_CHANGE_SIZE, we cannot use a subreg here.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "")))]
+ "TARGET_FP && reload_completed"
+ [(set (match_dup 2) (fix:DI (match_dup 1)))
+ (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ "operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (unspec:SI [(match_operand:DI 1 "reg_or_fp0_operand" "fG")] 5))]
"TARGET_FP"
- "addt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "cvtql%` %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn "fix_truncdfsi2_tp"
+ [(set (match_operand:SI 0 "register_operand" "=&f")
+ (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))
+ (clobber (match_scratch:DI 2 "=&f"))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "#"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "#"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_expand "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP"
+ "{ if (alpha_tp == ALPHA_TP_INSN)
+ { emit_insn(gen_fix_truncdfsi2_tp(operands[0], operands[1])); DONE; }
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=&f")
+ (fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvt%-q%(c %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "fix_truncdfdi2"
[(set (match_operand:DI 0 "register_operand" "=f")
(fix:DI (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "cvttqc %R1,%0"
- [(set_attr "type" "fpop")])
+ "cvt%-q%(c %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+;; Likewise between SFmode and SImode.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" ""))))
+ (clobber (match_scratch:DI 2 ""))]
+ "TARGET_FP && reload_completed"
+ [(set (match_dup 2) (fix:DI (float_extend:DF (match_dup 1))))
+ (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ "")
+
+;; Due to issues with CLASS_CANNOT_CHANGE_SIZE, we cannot use a subreg here.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (fix:SI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" ""))))]
+ "TARGET_FP && reload_completed"
+ [(set (match_dup 2) (fix:DI (float_extend:DF (match_dup 1))))
+ (set (match_dup 0) (unspec:SI [(match_dup 2)] 5))]
+ "operands[2] = gen_rtx_REG (DImode, REGNO (operands[0]));")
+
+(define_insn "fix_truncsfsi2_tp"
+ [(set (match_operand:SI 0 "register_operand" "=&f")
+ (fix:SI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))
+ (clobber (match_scratch:DI 2 "=&f"))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "#"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "#"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_expand "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP"
+ "{ if (alpha_tp == ALPHA_TP_INSN)
+ { emit_insn(gen_fix_truncsfsi2_tp(operands[0], operands[1])); DONE; }
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=&f")
+ (fix:DI (float_extend:DF
+ (match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvt%-q%(c %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "fix_truncsfdi2"
[(set (match_operand:DI 0 "register_operand" "=f")
(fix:DI (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))))]
"TARGET_FP"
- "cvttqc %R1,%0"
- [(set_attr "type" "fpop")])
+ "cvt%-q%(c %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (float:SF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvtq%,%+%& %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "floatdisf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float:SF (match_operand:DI 1 "register_operand" "f")))]
"TARGET_FP"
- "cvtqs %1,%0"
- [(set_attr "type" "fpop")])
+ "cvtq%,%+%& %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (float:DF (match_operand:DI 1 "register_operand" "f")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvtq%-%+%& %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "floatdidf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(float:DF (match_operand:DI 1 "register_operand" "f")))]
"TARGET_FP"
- "cvtqt %1,%0"
- [(set_attr "type" "fpop")])
+ "cvtq%-%+%& %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
-(define_insn "extendsfdf2"
- [(set (match_operand:DF 0 "register_operand" "=f,f")
- (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m")))]
+(define_expand "extendsfdf2"
+ [(use (match_operand:DF 0 "register_operand" ""))
+ (use (match_operand:SF 1 "nonimmediate_operand" ""))]
"TARGET_FP"
+"
+{
+ if (alpha_tp == ALPHA_TP_INSN)
+ emit_insn (gen_extendsfdf2_tp (operands[0],
+ force_reg (SFmode, operands[1])));
+ else
+ emit_insn (gen_extendsfdf2_no_tp (operands[0], operands[1]));
+
+ DONE;
+}")
+;; FIXME
+(define_insn "extendsfdf2_tp"
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvtsts %1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn "extendsfdf2_no_tp"
+ [(set (match_operand:DF 0 "register_operand" "=f,f,m")
+ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))]
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
"@
- addt $f31,%1,%0
- lds %0,%1"
- [(set_attr "type" "fpop,ld")])
+ cpys %1,%1,%0
+ ld%, %0,%1
+ st%- %1,%0"
+ [(set_attr "type" "fcpys,fld,fst")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (float_truncate:SF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cvt%-%,%)%& %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "truncdfsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(float_truncate:SF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "cvtts %R1,%0"
- [(set_attr "type" "fpop")])
+ "cvt%-%,%)%& %R1,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (div:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "div%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")])
(define_insn "divsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(div:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
(match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "divs %R1,%R2,%0"
- [(set_attr "type" "fdivs")])
+ "div%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (div:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "div%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")])
(define_insn "divdf3"
[(set (match_operand:DF 0 "register_operand" "=f")
(div:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "divt %R1,%R2,%0"
- [(set_attr "type" "fdivt")])
+ "div%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(div:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP"
- "divt %R1,%R2,%0"
- [(set_attr "type" "fdivt")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "div%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(div:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "divt %R1,%R2,%0"
- [(set_attr "type" "fdivt")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "div%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(div:DF (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(float_extend:DF (match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "divt %R1,%R2,%0"
- [(set_attr "type" "fdivt")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "div%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fdiv")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (mult:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "mul%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
(define_insn "mulsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(mult:SF (match_operand:SF 1 "reg_or_fp0_operand" "%fG")
(match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "muls %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "mul%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (mult:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "mul%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
(define_insn "muldf3"
[(set (match_operand:DF 0 "register_operand" "=f")
(mult:DF (match_operand:DF 1 "reg_or_fp0_operand" "%fG")
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "mult %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "mul%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(mult:DF (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP"
- "mult %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "mul%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1218,43 +2130,66 @@
(match_operand:SF 1 "reg_or_fp0_operand" "%fG"))
(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "mult %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "mul%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fmul")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (minus:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "sub%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "subsf3"
[(set (match_operand:SF 0 "register_operand" "=f")
(minus:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")
(match_operand:SF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "subs %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "sub%,%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (minus:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "sub%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn "subdf3"
[(set (match_operand:DF 0 "register_operand" "=f")
(minus:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
"TARGET_FP"
- "subt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "sub%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(minus:DF (float_extend:DF
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(match_operand:DF 2 "reg_or_fp0_operand" "fG")))]
- "TARGET_FP"
- "subt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "sub%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(minus:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")
(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "subt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "sub%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1262,9 +2197,44 @@
(match_operand:SF 1 "reg_or_fp0_operand" "fG"))
(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))))]
- "TARGET_FP"
- "subt %R1,%R2,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "sub%-%)%& %R1,%R2,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=&f")
+ (sqrt:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && TARGET_CIX && alpha_tp == ALPHA_TP_INSN"
+ "sqrt%,%)%& %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && TARGET_CIX"
+ "sqrt%,%)%& %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "opsize" "si")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (sqrt:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && TARGET_CIX && alpha_tp == ALPHA_TP_INSN"
+ "sqrt%-%)%& %R1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "trap" "yes")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (sqrt:DF (match_operand:DF 1 "reg_or_fp0_operand" "fG")))]
+ "TARGET_FP && TARGET_CIX"
+ "sqrt%-%)%& %1,%0"
+ [(set_attr "type" "fsqrt")
+ (set_attr "trap" "yes")])
;; Next are all the integer comparisons, and conditional moves and branches
;; and some of the related define_expand's and define_split's.
@@ -1278,31 +2248,13 @@
"cmp%C1 %r2,%3,%0"
[(set_attr "type" "icmp")])
-;; There are three important special-case that don't fit the above pattern
-;; but which we want to handle here.
-
-(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ne:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))]
- ""
- "cmpult $31,%1,%0"
- [(set_attr "type" "icmp")])
-
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r")
- (gt:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))]
+ (match_operator:DI 1 "alpha_swapped_comparison_operator"
+ [(match_operand:DI 2 "reg_or_8bit_operand" "rI")
+ (match_operand:DI 3 "reg_or_0_operand" "rJ")]))]
""
- "cmplt $31,%1,%0"
- [(set_attr "type" "icmp")])
-
-(define_insn ""
- [(set (match_operand:DI 0 "register_operand" "=r")
- (ge:DI (match_operand:DI 1 "register_operand" "r")
- (const_int 0)))]
- ""
- "cmple $31,%1,%0"
+ "cmp%c1 %r3,%2,%0"
[(set_attr "type" "icmp")])
;; This pattern exists so conditional moves of SImode values are handled.
@@ -1321,7 +2273,8 @@
cmov%C2 %r3,%1,%0
cmov%D2 %r3,%5,%0
cmov%c2 %r4,%1,%0
- cmov%d2 %r4,%5,%0")
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
@@ -1336,7 +2289,8 @@
cmov%C2 %r3,%1,%0
cmov%D2 %r3,%5,%0
cmov%c2 %r4,%1,%0
- cmov%d2 %r4,%5,%0")
+ cmov%d2 %r4,%5,%0"
+ [(set_attr "type" "icmov")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
@@ -1350,7 +2304,8 @@
""
"@
cmovlbc %r2,%1,%0
- cmovlbs %r2,%3,%0")
+ cmovlbs %r2,%3,%0"
+ [(set_attr "type" "icmov")])
(define_insn ""
[(set (match_operand:DI 0 "register_operand" "=r,r")
@@ -1364,7 +2319,8 @@
""
"@
cmovlbs %r2,%1,%0
- cmovlbc %r2,%3,%0")
+ cmovlbc %r2,%3,%0"
+ [(set_attr "type" "icmov")])
;; This form is added since combine thinks that an IF_THEN_ELSE with both
;; arms constant is a single insn, so it won't try to form it if combine
@@ -1382,7 +2338,9 @@
(match_dup 0)))
(clobber (match_scratch:DI 4 "=&r"))]
""
- "addq %0,%1,%4\;cmov%C2 %r3,%4,%0")
+ "addq %0,%1,%4\;cmov%C2 %r3,%4,%0"
+ [(set_attr "type" "icmov")
+ (set_attr "length" "8")])
(define_split
[(set (match_operand:DI 0 "register_operand" "")
@@ -1500,6 +2458,70 @@
(match_dup 0) (match_dup 1)))]
"")
+(define_insn "sminqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (smin:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minsb8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "uminqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (umin:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minub8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (smax:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxsb8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (umax:SI (match_operand:QI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:QI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxub8 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "sminhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (smin:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minsw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "uminhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (umin:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "minuw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "smaxhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (smax:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxsw4 %r1,%2,%0"
+ [(set_attr "type" "mvi")])
+
+(define_insn "umaxhi3"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (umax:SI (match_operand:HI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:HI 2 "reg_or_8bit_operand" "rI")))]
+ "TARGET_MAX"
+ "maxuw4 %r1,%2,%0"
+ [(set_attr "type" "shift")])
+
(define_expand "smaxdi3"
[(set (match_dup 3)
(le:DI (match_operand:DI 1 "reg_or_0_operand" "")
@@ -1528,7 +2550,8 @@
(smax:DI (match_operand:DI 1 "register_operand" "0")
(const_int 0)))]
""
- "cmovlt %0,0,%0")
+ "cmovlt %0,0,%0"
+ [(set_attr "type" "icmov")])
(define_expand "smindi3"
[(set (match_dup 3)
@@ -1558,7 +2581,8 @@
(smin:DI (match_operand:DI 1 "register_operand" "0")
(const_int 0)))]
""
- "cmovgt %0,0,%0")
+ "cmovgt %0,0,%0"
+ [(set_attr "type" "icmov")])
(define_expand "umaxdi3"
[(set (match_dup 3)
@@ -1621,6 +2645,18 @@
(define_insn ""
[(set (pc)
(if_then_else
+ (match_operator 1 "signed_comparison_operator"
+ [(const_int 0)
+ (match_operand:DI 2 "register_operand" "r")])
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "b%c1 %2,%0"
+ [(set_attr "type" "ibr")])
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
(ne (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
(const_int 1)
(const_int 0))
@@ -1674,13 +2710,35 @@
;; to DFmode.
(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(match_operand:DF 2 "reg_or_fp0_operand" "fG")
+ (match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(match_operator:DF 1 "alpha_comparison_operator"
[(match_operand:DF 2 "reg_or_fp0_operand" "fG")
(match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
- "TARGET_FP"
- "cmpt%C1 %R2,%R3,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))
+ (match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1688,9 +2746,21 @@
[(float_extend:DF
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))
(match_operand:DF 3 "reg_or_fp0_operand" "fG")]))]
- "TARGET_FP"
- "cmpt%C1 %R2,%R3,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(match_operand:DF 2 "reg_or_fp0_operand" "fG")
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1698,9 +2768,22 @@
[(match_operand:DF 2 "reg_or_fp0_operand" "fG")
(float_extend:DF
(match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
- "TARGET_FP"
- "cmpt%C1 %R2,%R3,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=&f")
+ (match_operator:DF 1 "alpha_comparison_operator"
+ [(float_extend:DF
+ (match_operand:SF 2 "reg_or_fp0_operand" "fG"))
+ (float_extend:DF
+ (match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
+ "TARGET_FP && alpha_tp == ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
@@ -1709,9 +2792,10 @@
(match_operand:SF 2 "reg_or_fp0_operand" "fG"))
(float_extend:DF
(match_operand:SF 3 "reg_or_fp0_operand" "fG"))]))]
- "TARGET_FP"
- "cmpt%C1 %R2,%R3,%0"
- [(set_attr "type" "fpop")])
+ "TARGET_FP && alpha_tp != ALPHA_TP_INSN"
+ "cmp%-%C1%' %R2,%R3,%0"
+ [(set_attr "type" "fadd")
+ (set_attr "trap" "yes")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
@@ -1725,7 +2809,7 @@
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f,f")
@@ -1739,21 +2823,21 @@
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
(if_then_else:DF
(match_operator 3 "signed_comparison_operator"
- [(match_operand:DF 1 "reg_or_fp0_operand" "fG,fG")
+ [(match_operand:DF 4 "reg_or_fp0_operand" "fG,fG")
(match_operand:DF 2 "fp0_operand" "G,G")])
- (float_extend:DF (match_operand:SF 4 "reg_or_fp0_operand" "fG,0"))
+ (float_extend:DF (match_operand:SF 1 "reg_or_fp0_operand" "fG,0"))
(match_operand:DF 5 "reg_or_fp0_operand" "0,fG")))]
"TARGET_FP"
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
@@ -1768,7 +2852,7 @@
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f,f")
@@ -1783,7 +2867,7 @@
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
@@ -1798,7 +2882,7 @@
"@
fcmov%C3 %R4,%R1,%0
fcmov%D3 %R4,%R5,%0"
- [(set_attr "type" "fpop")])
+ [(set_attr "type" "fcmov")])
(define_expand "maxdf3"
[(set (match_dup 3)
@@ -1905,198 +2989,84 @@
}")
(define_expand "beq"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- enum machine_mode mode;
- enum rtx_code compare_code, branch_code;
-
- if (alpha_compare_fp_p)
- mode = DFmode, compare_code = EQ, branch_code = NE;
- else
- {
- mode = DImode, compare_code = MINUS, branch_code = EQ;
- if (GET_CODE (alpha_compare_op1) == CONST_INT)
- {
- compare_code = PLUS;
- alpha_compare_op1 = GEN_INT (- INTVAL (alpha_compare_op1));
- }
- }
-
- operands[1] = gen_reg_rtx (mode);
- operands[2] = gen_rtx (compare_code, mode,
- alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (branch_code, VOIDmode,
- operands[1], CONST0_RTX (mode));
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (EQ); }")
(define_expand "bne"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- enum machine_mode mode;
- enum rtx_code compare_code, branch_code;
-
- if (alpha_compare_fp_p)
- mode = DFmode, compare_code = EQ, branch_code = EQ;
- else
- {
- mode = DImode, compare_code = MINUS, branch_code = NE;
- if (GET_CODE (alpha_compare_op1) == CONST_INT)
- {
- compare_code = PLUS;
- alpha_compare_op1 = GEN_INT (- INTVAL (alpha_compare_op1));
- }
- }
-
- operands[1] = gen_reg_rtx (mode);
- operands[2] = gen_rtx (compare_code, mode,
- alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (branch_code, VOIDmode,
- operands[1], CONST0_RTX (mode));
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (NE); }")
(define_expand "blt"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- enum machine_mode mode = alpha_compare_fp_p ? DFmode : DImode;
- operands[1] = gen_reg_rtx (mode);
- operands[2] = gen_rtx (LT, mode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (mode));
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (LT); }")
(define_expand "ble"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- enum machine_mode mode = alpha_compare_fp_p ? DFmode : DImode;
- operands[1] = gen_reg_rtx (mode);
- operands[2] = gen_rtx (LE, mode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (mode));
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (LE); }")
(define_expand "bgt"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- if (alpha_compare_fp_p)
- {
- operands[1] = gen_reg_rtx (DFmode);
- operands[2] = gen_rtx (LT, DFmode, alpha_compare_op1, alpha_compare_op0);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (DFmode));
- }
- else
- {
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LE, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
- }
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (GT); }")
(define_expand "bge"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- if (alpha_compare_fp_p)
- {
- operands[1] = gen_reg_rtx (DFmode);
- operands[2] = gen_rtx (LE, DFmode, alpha_compare_op1, alpha_compare_op0);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], CONST0_RTX (DFmode));
- }
- else
- {
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LT, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
- }
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (GE); }")
(define_expand "bltu"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], const0_rtx);
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (LTU); }")
(define_expand "bleu"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (NE, VOIDmode, operands[1], const0_rtx);
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (LEU); }")
(define_expand "bgtu"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (GTU); }")
(define_expand "bgeu"
- [(set (match_dup 1) (match_dup 2))
- (set (pc)
- (if_then_else (match_dup 3)
+ [(set (pc)
+ (if_then_else (match_dup 1)
(label_ref (match_operand 0 "" ""))
(pc)))]
""
- "
-{
- operands[1] = gen_reg_rtx (DImode);
- operands[2] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
- operands[3] = gen_rtx (EQ, VOIDmode, operands[1], const0_rtx);
-}")
+ "{ operands[1] = alpha_emit_conditional_branch (GEU); }")
(define_expand "seq"
[(set (match_operand:DI 0 "register_operand" "")
@@ -2107,7 +3077,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (EQ, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_EQ (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "sne"
@@ -2120,7 +3090,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (EQ, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_EQ (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "slt"
@@ -2132,7 +3102,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LT, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_LT (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "sle"
@@ -2144,7 +3114,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LE, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_LE (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "sgt"
@@ -2156,8 +3126,8 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LT, DImode, force_reg (DImode, alpha_compare_op1),
- alpha_compare_op0);
+ operands[1] = gen_rtx_LT (DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
}")
(define_expand "sge"
@@ -2169,8 +3139,8 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LE, DImode, force_reg (DImode, alpha_compare_op1),
- alpha_compare_op0);
+ operands[1] = gen_rtx_LE (DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
}")
(define_expand "sltu"
@@ -2182,7 +3152,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LTU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_LTU (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "sleu"
@@ -2194,7 +3164,7 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LEU, DImode, alpha_compare_op0, alpha_compare_op1);
+ operands[1] = gen_rtx_LEU (DImode, alpha_compare_op0, alpha_compare_op1);
}")
(define_expand "sgtu"
@@ -2206,8 +3176,8 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LTU, DImode, force_reg (DImode, alpha_compare_op1),
- alpha_compare_op0);
+ operands[1] = gen_rtx_LTU (DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
}")
(define_expand "sgeu"
@@ -2219,184 +3189,58 @@
if (alpha_compare_fp_p)
FAIL;
- operands[1] = gen_rtx (LEU, DImode, force_reg (DImode, alpha_compare_op1),
- alpha_compare_op0);
+ operands[1] = gen_rtx_LEU (DImode, force_reg (DImode, alpha_compare_op1),
+ alpha_compare_op0);
}")
;; These are the main define_expand's used to make conditional moves.
(define_expand "movsicc"
- [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
- (set (match_operand:SI 0 "register_operand" "")
- (if_then_else:DI (match_dup 5)
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
(match_operand:SI 2 "reg_or_8bit_operand" "")
(match_operand:SI 3 "reg_or_8bit_operand" "")))]
""
"
{
- rtx op0,op1;
- enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
-
- if (alpha_compare_fp_p)
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], SImode)) == 0)
FAIL;
- switch (code)
- {
- case EQ: case LE: case LT:
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case NE:
- code = code2 = EQ;
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case GE:
- code = LE;
- op0 = force_reg (DImode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- case GT:
- code = LT;
- op0 = force_reg (DImode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- default:
- FAIL;
- }
- operands[1] = gen_rtx (code, DImode, op0, op1);
- operands[4] = gen_reg_rtx (DImode);
- operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DImode));
}")
(define_expand "movdicc"
- [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
- (set (match_operand:DI 0 "register_operand" "")
- (if_then_else:DI (match_dup 5)
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
(match_operand:DI 2 "reg_or_8bit_operand" "")
(match_operand:DI 3 "reg_or_8bit_operand" "")))]
""
"
{
- rtx op0,op1;
- enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
-
- if (alpha_compare_fp_p)
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], DImode)) == 0)
FAIL;
- switch (code)
- {
- case EQ: case LE: case LT:
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case NE:
- code = code2 = EQ;
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case GE:
- code = LE;
- op0 = force_reg (DImode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- case GT:
- code = LT;
- op0 = force_reg (DImode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- default:
- FAIL;
- }
- operands[1] = gen_rtx (code, DImode, op0, op1);
- operands[4] = gen_reg_rtx (DImode);
- operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DImode));
}")
(define_expand "movsfcc"
- [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
- (set (match_operand:SF 0 "register_operand" "")
- (if_then_else:SF (match_dup 5)
- (match_operand:SF 2 "reg_or_fp0_operand" "")
- (match_operand:SF 3 "reg_or_fp0_operand" "")))]
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "reg_or_8bit_operand" "")
+ (match_operand:SF 3 "reg_or_8bit_operand" "")))]
""
"
{
- rtx op0,op1;
- enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
-
- if (!alpha_compare_fp_p)
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], SFmode)) == 0)
FAIL;
- switch (code)
- {
- case EQ: case LE: case LT:
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case NE:
- /* There isn't a cmptne insn. */
- code = code2 = EQ;
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case GE:
- code = LE;
- op0 = force_reg (DFmode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- case GT:
- code = LT;
- op0 = force_reg (DFmode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- default:
- FAIL;
- }
- operands[1] = gen_rtx (code, DFmode, op0, op1);
- operands[4] = gen_reg_rtx (DFmode);
- operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DFmode));
}")
(define_expand "movdfcc"
- [(set (match_dup 4) (match_operand 1 "comparison_operator" ""))
- (set (match_operand:DF 0 "register_operand" "")
- (if_then_else:DF (match_dup 5)
- (match_operand:DF 2 "reg_or_fp0_operand" "")
- (match_operand:DF 3 "reg_or_fp0_operand" "")))]
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "reg_or_8bit_operand" "")
+ (match_operand:DF 3 "reg_or_8bit_operand" "")))]
""
"
{
- rtx op0,op1;
- enum rtx_code code = GET_CODE (operands[1]), code2 = NE;
-
- if (!alpha_compare_fp_p)
+ if ((operands[1] = alpha_emit_conditional_move (operands[1], DFmode)) == 0)
FAIL;
- switch (code)
- {
- case EQ: case LE: case LT:
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case NE:
- /* There isn't a cmptne insn. */
- code = code2 = EQ;
- op0 = alpha_compare_op0;
- op1 = alpha_compare_op1;
- break;
- case GE:
- code = LE;
- op0 = force_reg (DFmode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- case GT:
- code = LT;
- op0 = force_reg (DFmode, alpha_compare_op1);
- op1 = alpha_compare_op0;
- break;
- default:
- FAIL;
- }
- operands[1] = gen_rtx (code, DFmode, op0, op1);
- operands[4] = gen_reg_rtx (DFmode);
- operands[5] = gen_rtx (code2, VOIDmode, operands[4], CONST0_RTX (DFmode));
}")
;; These define_split definitions are used in cases when comparisons have
@@ -2451,25 +3295,25 @@
&& extended_count (operands[3], DImode, unsignedp) >= 1))
{
if (GET_CODE (operands[3]) == CONST_INT)
- operands[7] = gen_rtx (PLUS, DImode, operands[2],
- GEN_INT (- INTVAL (operands[3])));
+ operands[7] = gen_rtx_PLUS (DImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
else
- operands[7] = gen_rtx (MINUS, DImode, operands[2], operands[3]);
+ operands[7] = gen_rtx_MINUS (DImode, operands[2], operands[3]);
- operands[8] = gen_rtx (code, VOIDmode, operands[6], const0_rtx);
+ operands[8] = gen_rtx_fmt_ee (code, VOIDmode, operands[6], const0_rtx);
}
else if (code == EQ || code == LE || code == LT
|| code == LEU || code == LTU)
{
- operands[7] = gen_rtx (code, DImode, operands[2], operands[3]);
- operands[8] = gen_rtx (NE, VOIDmode, operands[6], const0_rtx);
+ operands[7] = gen_rtx_fmt_ee (code, DImode, operands[2], operands[3]);
+ operands[8] = gen_rtx_NE (VOIDmode, operands[6], const0_rtx);
}
else
{
- operands[7] = gen_rtx (reverse_condition (code), DImode, operands[2],
- operands[3]);
- operands[8] = gen_rtx (EQ, VOIDmode, operands[6], const0_rtx);
+ operands[7] = gen_rtx_fmt_ee (reverse_condition (code), DImode,
+ operands[2], operands[3]);
+ operands[8] = gen_rtx_EQ (VOIDmode, operands[6], const0_rtx);
}
}")
@@ -2498,14 +3342,14 @@
FAIL;
if (GET_CODE (operands[3]) == CONST_INT)
- tem = gen_rtx (PLUS, SImode, operands[2],
- GEN_INT (- INTVAL (operands[3])));
+ tem = gen_rtx_PLUS (SImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
else
- tem = gen_rtx (MINUS, SImode, operands[2], operands[3]);
+ tem = gen_rtx_MINUS (SImode, operands[2], operands[3]);
- operands[7] = gen_rtx (SIGN_EXTEND, DImode, tem);
- operands[8] = gen_rtx (GET_CODE (operands[1]), VOIDmode, operands[6],
- const0_rtx);
+ operands[7] = gen_rtx_SIGN_EXTEND (DImode, tem);
+ operands[8] = gen_rtx_fmt_ee (GET_CODE (operands[1]), VOIDmode,
+ operands[6], const0_rtx);
}")
(define_split
@@ -2529,25 +3373,25 @@
&& extended_count (operands[3], DImode, unsignedp) >= 1))
{
if (GET_CODE (operands[3]) == CONST_INT)
- operands[5] = gen_rtx (PLUS, DImode, operands[2],
- GEN_INT (- INTVAL (operands[3])));
+ operands[5] = gen_rtx_PLUS (DImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
else
- operands[5] = gen_rtx (MINUS, DImode, operands[2], operands[3]);
+ operands[5] = gen_rtx_MINUS (DImode, operands[2], operands[3]);
- operands[6] = gen_rtx (code, VOIDmode, operands[4], const0_rtx);
+ operands[6] = gen_rtx_fmt_ee (code, VOIDmode, operands[4], const0_rtx);
}
else if (code == EQ || code == LE || code == LT
|| code == LEU || code == LTU)
{
- operands[5] = gen_rtx (code, DImode, operands[2], operands[3]);
- operands[6] = gen_rtx (NE, VOIDmode, operands[4], const0_rtx);
+ operands[5] = gen_rtx_fmt_ee (code, DImode, operands[2], operands[3]);
+ operands[6] = gen_rtx_NE (VOIDmode, operands[4], const0_rtx);
}
else
{
- operands[5] = gen_rtx (reverse_condition (code), DImode, operands[2],
- operands[3]);
- operands[6] = gen_rtx (EQ, VOIDmode, operands[4], const0_rtx);
+ operands[5] = gen_rtx_fmt_ee (reverse_condition (code), DImode,
+ operands[2], operands[3]);
+ operands[6] = gen_rtx_EQ (VOIDmode, operands[4], const0_rtx);
}
}")
@@ -2568,14 +3412,14 @@
{ rtx tem;
if (GET_CODE (operands[3]) == CONST_INT)
- tem = gen_rtx (PLUS, SImode, operands[2],
- GEN_INT (- INTVAL (operands[3])));
+ tem = gen_rtx_PLUS (SImode, operands[2],
+ GEN_INT (- INTVAL (operands[3])));
else
- tem = gen_rtx (MINUS, SImode, operands[2], operands[3]);
+ tem = gen_rtx_MINUS (SImode, operands[2], operands[3]);
- operands[5] = gen_rtx (SIGN_EXTEND, DImode, tem);
- operands[6] = gen_rtx (GET_CODE (operands[1]), VOIDmode,
- operands[4], const0_rtx);
+ operands[5] = gen_rtx_SIGN_EXTEND (DImode, tem);
+ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[1]), VOIDmode,
+ operands[4], const0_rtx);
}")
;; We can convert such things as "a > 0xffff" to "t = a & ~ 0xffff; t != 0".
@@ -2597,10 +3441,10 @@
"
{
operands[5] = GEN_INT (~ INTVAL (operands[3]));
- operands[6] = gen_rtx (((GET_CODE (operands[1]) == GTU
- || GET_CODE (operands[1]) == GT)
- ? NE : EQ),
- DImode, operands[4], const0_rtx);
+ operands[6] = gen_rtx_fmt_ee (((GET_CODE (operands[1]) == GTU
+ || GET_CODE (operands[1]) == GT)
+ ? NE : EQ),
+ DImode, operands[4], const0_rtx);
}")
;; Here are the CALL and unconditional branch insns. Calls on NT and OSF
@@ -2608,11 +3452,15 @@
(define_expand "call"
[(use (match_operand:DI 0 "" ""))
- (use (match_operand 1 "" ""))]
+ (use (match_operand 1 "" ""))
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))]
""
"
-{ if (WINDOWS_NT)
+{ if (TARGET_WINDOWS_NT)
emit_call_insn (gen_call_nt (operands[0], operands[1]));
+ else if (TARGET_OPEN_VMS)
+ emit_call_insn (gen_call_vms (operands[0], operands[2]));
else
emit_call_insn (gen_call_osf (operands[0], operands[1]));
@@ -2634,39 +3482,95 @@
if (GET_CODE (operands[0]) != SYMBOL_REF
&& ! (GET_CODE (operands[0]) == REG && REGNO (operands[0]) == 27))
{
- rtx tem = gen_rtx (REG, DImode, 27);
+ rtx tem = gen_rtx_REG (DImode, 27);
emit_move_insn (tem, operands[0]);
operands[0] = tem;
}
}")
(define_expand "call_nt"
- [(parallel [(call (mem:DI (match_operand:DI 0 "" ""))
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
(match_operand 1 "" ""))
(clobber (reg:DI 26))])]
""
"
{ if (GET_CODE (operands[0]) != MEM)
abort ();
+
operands[0] = XEXP (operands[0], 0);
+ if (GET_CODE (operands[0]) != SYMBOL_REF && GET_CODE (operands[0]) != REG)
+ operands[0] = force_reg (DImode, operands[0]);
+}")
- if (GET_CODE (operands[1]) != SYMBOL_REF
- && ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
+;;
+;; call openvms/alpha
+;; op 0: symbol ref for called function
+;; op 1: next_arg_reg (argument information value for R25)
+;;
+(define_expand "call_vms"
+ [(parallel [(call (mem:DI (match_operand 0 "" ""))
+ (match_operand 1 "" ""))
+ (use (match_dup 2))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ ""
+ "
+{ if (GET_CODE (operands[0]) != MEM)
+ abort ();
+
+ operands[0] = XEXP (operands[0], 0);
+
+ /* Always load AI with argument information, then handle symbolic and
+ indirect call differently. Load RA and set operands[2] to PV in
+ both cases. */
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[1]);
+ if (GET_CODE (operands[0]) == SYMBOL_REF)
{
- rtx tem = gen_rtx (REG, DImode, 27);
- emit_move_insn (tem, operands[1]);
- operands[1] = tem;
+ extern char *savealloc ();
+ char *linksym, *symbol = XSTR (operands[0], 0);
+ rtx linkage;
+
+ if (*symbol == '*')
+ symbol++;
+ linksym = savealloc (strlen (symbol) + 6);
+
+ alpha_need_linkage (symbol, 0);
+
+ linksym[0] = '$';
+ strcpy (linksym+1, symbol);
+ strcat (linksym, \"..lk\");
+ linkage = gen_rtx_SYMBOL_REF (Pmode, linksym);
+
+ emit_move_insn (gen_rtx_REG (Pmode, 26), gen_rtx_MEM (Pmode, linkage));
+
+ operands[2]
+ = validize_mem (gen_rtx_MEM (Pmode, plus_constant (linkage, 8)));
}
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, 26),
+ gen_rtx_MEM (Pmode, plus_constant (operands[0], 8)));
+
+ operands[2] = operands[0];
+ }
+
}")
(define_expand "call_value"
[(use (match_operand 0 "" ""))
(use (match_operand:DI 1 "" ""))
- (use (match_operand 2 "" ""))]
+ (use (match_operand 2 "" ""))
+ (use (match_operand 3 "" ""))
+ (use (match_operand 4 "" ""))]
""
"
-{ if (WINDOWS_NT)
+{ if (TARGET_WINDOWS_NT)
emit_call_insn (gen_call_value_nt (operands[0], operands[1], operands[2]));
+ else if (TARGET_OPEN_VMS)
+ emit_call_insn (gen_call_value_vms (operands[0], operands[1],
+ operands[3]));
else
emit_call_insn (gen_call_value_osf (operands[0], operands[1],
operands[2]));
@@ -2689,7 +3593,7 @@
if (GET_CODE (operands[1]) != SYMBOL_REF
&& ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
{
- rtx tem = gen_rtx (REG, DImode, 27);
+ rtx tem = gen_rtx_REG (DImode, 27);
emit_move_insn (tem, operands[1]);
operands[1] = tem;
}
@@ -2697,7 +3601,7 @@
(define_expand "call_value_nt"
[(parallel [(set (match_operand 0 "" "")
- (call (mem:DI (match_operand:DI 1 "" ""))
+ (call (mem:DI (match_operand 1 "" ""))
(match_operand 2 "" "")))
(clobber (reg:DI 26))])]
""
@@ -2706,12 +3610,57 @@
abort ();
operands[1] = XEXP (operands[1], 0);
- if (GET_CODE (operands[1]) != SYMBOL_REF
- && ! (GET_CODE (operands[1]) == REG && REGNO (operands[1]) == 27))
+ if (GET_CODE (operands[0]) != SYMBOL_REF && GET_CODE (operands[0]) != REG)
+ operands[1] = force_reg (DImode, operands[1]);
+}")
+
+(define_expand "call_value_vms"
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:DI (match_operand:DI 1 "" ""))
+ (match_operand 2 "" "")))
+ (use (match_dup 3))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))])]
+ ""
+ "
+{ if (GET_CODE (operands[1]) != MEM)
+ abort ();
+
+ operands[1] = XEXP (operands[1], 0);
+
+ /* Always load AI with argument information, then handle symbolic and
+ indirect call differently. Load RA and set operands[3] to PV in
+ both cases. */
+
+ emit_move_insn (gen_rtx_REG (DImode, 25), operands[2]);
+ if (GET_CODE (operands[1]) == SYMBOL_REF)
{
- rtx tem = gen_rtx (REG, DImode, 27);
- emit_move_insn (tem, operands[1]);
- operands[1] = tem;
+ extern char *savealloc ();
+ char *linksym, *symbol = XSTR (operands[1], 0);
+ rtx linkage;
+
+ if (*symbol == '*')
+ symbol++;
+ linksym = savealloc (strlen (symbol) + 6);
+
+ alpha_need_linkage (symbol, 0);
+ linksym[0] = '$';
+ strcpy (linksym+1, symbol);
+ strcat (linksym, \"..lk\");
+ linkage = gen_rtx_SYMBOL_REF (Pmode, linksym);
+
+ emit_move_insn (gen_rtx_REG (Pmode, 26), gen_rtx_MEM (Pmode, linkage));
+
+ operands[3]
+ = validize_mem (gen_rtx_MEM (Pmode, plus_constant (linkage, 8)));
+ }
+ else
+ {
+ emit_move_insn (gen_rtx_REG (Pmode, 26),
+ gen_rtx_MEM (Pmode, plus_constant (operands[1], 8)));
+
+ operands[3] = operands[1];
}
}")
@@ -2720,46 +3669,81 @@
(match_operand 1 "" ""))
(clobber (reg:DI 27))
(clobber (reg:DI 26))]
- "! WINDOWS_NT"
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS"
"@
jsr $26,($27),0\;ldgp $29,0($26)
- bsr $26,%0..ng
+ bsr $26,$%0..ng
jsr $26,%0\;ldgp $29,0($26)"
- [(set_attr "type" "jsr,jsr,ibr")])
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,12")])
(define_insn ""
- [(call (mem:DI (match_operand:DI 0 "call_operand" "r,i"))
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,R,i"))
(match_operand 1 "" ""))
(clobber (reg:DI 26))]
- "WINDOWS_NT"
+ "TARGET_WINDOWS_NT"
"@
jsr $26,(%0)
- bsr $26,%0"
- [(set_attr "type" "jsr")])
+ bsr $26,%0
+ jsr $26,%0"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,12")])
(define_insn ""
+ [(call (mem:DI (match_operand:DI 0 "call_operand" "r,i"))
+ (match_operand 1 "" ""))
+ (use (match_operand:DI 2 "general_operand" "r,m"))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_OPEN_VMS"
+ "@
+ bis %2,%2,$27\;jsr $26,0\;ldq $27,0($29)
+ ldq $27,%2\;jsr $26,%0\;ldq $27,0($29)"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,16")])
+
+(define_insn ""
[(set (match_operand 0 "register_operand" "=rf,rf,rf")
(call (mem:DI (match_operand:DI 1 "call_operand" "r,R,i"))
(match_operand 2 "" "")))
(clobber (reg:DI 27))
(clobber (reg:DI 26))]
- "! WINDOWS_NT"
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS"
"@
jsr $26,($27),0\;ldgp $29,0($26)
- bsr $26,%1..ng
+ bsr $26,$%1..ng
jsr $26,%1\;ldgp $29,0($26)"
- [(set_attr "type" "jsr,jsr,ibr")])
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,*,12")])
(define_insn ""
- [(set (match_operand 0 "register_operand" "=rf,rf")
- (call (mem:DI (match_operand:DI 1 "call_operand" "r,i"))
+ [(set (match_operand 0 "register_operand" "=rf,rf,rf")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,R,i"))
(match_operand 2 "" "")))
(clobber (reg:DI 26))]
- "WINDOWS_NT"
+ "TARGET_WINDOWS_NT"
"@
jsr $26,(%1)
- bsr $26,%1"
- [(set_attr "type" "jsr")])
+ bsr $26,%1
+ jsr $26,%1"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "*,*,12")])
+
+(define_insn ""
+ [(set (match_operand 0 "register_operand" "")
+ (call (mem:DI (match_operand:DI 1 "call_operand" "r,i"))
+ (match_operand 2 "" "")))
+ (use (match_operand:DI 3 "general_operand" "r,m"))
+ (use (reg:DI 25))
+ (use (reg:DI 26))
+ (clobber (reg:DI 27))]
+ "TARGET_OPEN_VMS"
+ "@
+ bis %3,%3,$27\;jsr $26,0\;ldq $27,0($29)
+ ldq $27,%3\;jsr $26,%1\;ldq $27,0($29)"
+ [(set_attr "type" "jsr")
+ (set_attr "length" "12,16")])
;; Call subroutine returning any type.
@@ -2796,7 +3780,8 @@
(define_insn "blockage"
[(unspec_volatile [(const_int 0)] 1)]
""
- "")
+ ""
+ [(set_attr "length" "0")])
(define_insn "jump"
[(set (pc)
@@ -2811,6 +3796,15 @@
"ret $31,($26),1"
[(set_attr "type" "ibr")])
+;; Use a different pattern for functions which have non-trivial
+;; epilogues so as not to confuse jump and reorg.
+(define_insn "return_internal"
+ [(use (reg:DI 26))
+ (return)]
+ ""
+ "ret $31,($26),1"
+ [(set_attr "type" "ibr")])
+
(define_insn "indirect_jump"
[(set (pc) (match_operand:DI 0 "register_operand" "r"))]
""
@@ -2820,8 +3814,8 @@
(define_insn "nop"
[(const_int 0)]
""
- "bis $31,$31,$31"
- [(set_attr "type" "iaddlog")])
+ "nop"
+ [(set_attr "type" "ilog")])
(define_expand "tablejump"
[(use (match_operand:SI 0 "register_operand" ""))
@@ -2829,8 +3823,10 @@
""
"
{
- if (WINDOWS_NT)
+ if (TARGET_WINDOWS_NT)
emit_jump_insn (gen_tablejump_nt (operands[0], operands[1]));
+ else if (TARGET_OPEN_VMS)
+ emit_jump_insn (gen_tablejump_vms (operands[0], operands[1]));
else
emit_jump_insn (gen_tablejump_osf (operands[0], operands[1]));
@@ -2858,12 +3854,27 @@
"
{ operands[3] = gen_reg_rtx (DImode); }")
+;;
+;; tablejump, openVMS way
+;; op 0: offset
+;; op 1: label preceding jump-table
+;;
+(define_expand "tablejump_vms"
+ [(set (match_dup 2)
+ (match_operand:DI 0 "register_operand" ""))
+ (set (pc)
+ (plus:DI (match_dup 2)
+ (label_ref:DI (match_operand 1 "" ""))))]
+ ""
+ "
+{ operands[2] = gen_reg_rtx (DImode); }")
+
(define_insn ""
[(set (pc)
(plus:DI (match_operand:DI 0 "register_operand" "r")
(label_ref:DI (match_operand 1 "" ""))))
(clobber (match_scratch:DI 2 "=r"))]
- "! WINDOWS_NT && next_active_insn (insn) != 0
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && next_active_insn (insn) != 0
&& GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC
&& PREV_INSN (next_active_insn (insn)) == operands[1]"
"*
@@ -2900,13 +3911,14 @@
else
return \"addq %0,$29,%2\;jmp $31,(%2),0\";
}"
- [(set_attr "type" "ibr")])
+ [(set_attr "type" "ibr")
+ (set_attr "length" "8")])
(define_insn ""
[(set (pc)
(match_operand:DI 0 "register_operand" "r"))
(use (label_ref (match_operand 1 "" "")))]
- "WINDOWS_NT && next_active_insn (insn) != 0
+ "TARGET_WINDOWS_NT && next_active_insn (insn) != 0
&& GET_CODE (PATTERN (next_active_insn (insn))) == ADDR_DIFF_VEC
&& PREV_INSN (next_active_insn (insn)) == operands[1]"
"*
@@ -2945,12 +3957,30 @@
}"
[(set_attr "type" "ibr")])
+;;
+;; op 0 is table offset
+;; op 1 is table label
+;;
+
+(define_insn ""
+ [(set (pc)
+ (plus:DI (match_operand 0 "register_operand" "r")
+ (label_ref (match_operand 1 "" ""))))]
+ "TARGET_OPEN_VMS"
+ "jmp $31,(%0),0"
+ [(set_attr "type" "ibr")])
+
;; Cache flush. Used by INITIALIZE_TRAMPOLINE. 0x86 is PAL_imb, but we don't
;; want to have to include pal.h in our .s file.
-(define_insn ""
+;;
+;; Technically the type for call_pal is jsr, but we use that for determining
+;; if we need a GP. Use ibr instead since it has the same EV5 scheduling
+;; characteristics.
+(define_insn "imb"
[(unspec_volatile [(const_int 0)] 0)]
""
- "call_pal 0x86")
+ "call_pal 0x86"
+ [(set_attr "type" "ibr")])
;; Finally, we have the basic data motion insns. The byte and word insns
;; are done via define_expand. Start with the floating-point insns, since
@@ -2959,32 +3989,70 @@
(define_insn ""
[(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
(match_operand:SF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
- "register_operand (operands[0], SFmode)
- || reg_or_fp0_operand (operands[1], SFmode)"
+ "! TARGET_CIX
+ && (register_operand (operands[0], SFmode)
+ || reg_or_fp0_operand (operands[1], SFmode))"
+ "@
+ bis %r1,%r1,%0
+ ldl %0,%1
+ stl %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ld%, %0,%1
+ st%, %R1,%0"
+ [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst")])
+
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m,f,*r")
+ (match_operand:SF 1 "input_operand" "rG,m,rG,f,G,m,fG,r,*f"))]
+ "TARGET_CIX
+ && (register_operand (operands[0], SFmode)
+ || reg_or_fp0_operand (operands[1], SFmode))"
"@
bis %r1,%r1,%0
ldl %0,%1
stl %r1,%0
cpys %1,%1,%0
cpys $f31,$f31,%0
- lds %0,%1
- sts %R1,%0"
- [(set_attr "type" "iaddlog,ld,st,fpop,fpop,ld,st")])
+ ld%, %0,%1
+ st%, %R1,%0
+ itofs %1,%0
+ ftois %1,%0"
+ [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst,itof,ftoi")])
(define_insn ""
[(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m")
(match_operand:DF 1 "input_operand" "rG,m,rG,f,G,m,fG"))]
- "register_operand (operands[0], DFmode)
- || reg_or_fp0_operand (operands[1], DFmode)"
+ "! TARGET_CIX
+ && (register_operand (operands[0], DFmode)
+ || reg_or_fp0_operand (operands[1], DFmode))"
"@
bis %r1,%r1,%0
ldq %0,%1
stq %r1,%0
cpys %1,%1,%0
cpys $f31,$f31,%0
- ldt %0,%1
- stt %R1,%0"
- [(set_attr "type" "iaddlog,ld,st,fpop,fpop,ld,st")])
+ ld%- %0,%1
+ st%- %R1,%0"
+ [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst")])
+
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m,f,f,f,m,f,*r")
+ (match_operand:DF 1 "input_operand" "rG,m,rG,f,G,m,fG,r,*f"))]
+ "TARGET_CIX
+ && (register_operand (operands[0], DFmode)
+ || reg_or_fp0_operand (operands[1], DFmode))"
+ "@
+ bis %r1,%r1,%0
+ ldq %0,%1
+ stq %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ld%- %0,%1
+ st%- %R1,%0
+ itoft %1,%0
+ ftoit %1,%0"
+ [(set_attr "type" "ilog,ild,ist,fcpys,fcpys,fld,fst,itof,ftoi")])
(define_expand "movsf"
[(set (match_operand:SF 0 "nonimmediate_operand" "")
@@ -3011,8 +4079,9 @@
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,f,f,f,m")
(match_operand:SI 1 "input_operand" "r,J,I,K,L,m,rJ,f,J,m,fG"))]
- "! WINDOWS_NT && (register_operand (operands[0], SImode)
- || reg_or_0_operand (operands[1], SImode))"
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && ! TARGET_CIX
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
"@
bis %1,%1,%0
bis $31,$31,%0
@@ -3023,15 +4092,38 @@
stl %r1,%0
cpys %1,%1,%0
cpys $f31,$f31,%0
- lds %0,%1
- sts %R1,%0"
- [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ld,st,fpop,fpop,ld,st")])
+ ld%, %0,%1
+ st%, %R1,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ild,ist,fcpys,fcpys,fld,fst")])
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,m,f,f,f,m,r,*f")
+ (match_operand:SI 1 "input_operand" "r,J,I,K,L,m,rJ,f,J,m,fG,f,*r"))]
+ "! TARGET_WINDOWS_NT && ! TARGET_OPEN_VMS && TARGET_CIX
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ ldl %0,%1
+ stl %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ld%, %0,%1
+ st%, %R1,%0
+ ftois %1,%0
+ itofs %1,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ild,ist,fcpys,fcpys,fld,fst,ftoi,itof")])
(define_insn ""
[(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,r,r,m,f,f,f,m")
(match_operand:SI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,m,fG"))]
- "WINDOWS_NT && (register_operand (operands[0], SImode)
- || reg_or_0_operand (operands[1], SImode))"
+ "(TARGET_WINDOWS_NT || TARGET_OPEN_VMS)
+ && (register_operand (operands[0], SImode)
+ || reg_or_0_operand (operands[1], SImode))"
"@
bis %1,%1,%0
bis $31,$31,%0
@@ -3043,15 +4135,16 @@
stl %r1,%0
cpys %1,%1,%0
cpys $f31,$f31,%0
- lds %0,%1
- sts %R1,%0"
- [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ldsym,ld,st,fpop,fpop,ld,st")])
+ ld%, %0,%1
+ st%, %R1,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst")])
(define_insn ""
[(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
(match_operand:HI 1 "input_operand" "r,J,I,n,f,J"))]
- "register_operand (operands[0], HImode)
- || register_operand (operands[1], HImode)"
+ "! TARGET_BWX
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
"@
bis %1,%1,%0
bis $31,$31,%0
@@ -3059,21 +4152,56 @@
lda %0,%L1
cpys %1,%1,%0
cpys $f31,$f31,%0"
- [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,fpop,fpop")])
+ [(set_attr "type" "ilog,ilog,ilog,iadd,fcpys,fcpys")])
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,r,m,f,f")
+ (match_operand:HI 1 "input_operand" "r,J,I,n,m,rJ,f,J"))]
+ "TARGET_BWX
+ && (register_operand (operands[0], HImode)
+ || reg_or_0_operand (operands[1], HImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%L1
+ ldwu %0,%1
+ stw %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,ild,ist,fcpys,fcpys")])
(define_insn ""
[(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,r,f,f")
(match_operand:QI 1 "input_operand" "r,J,I,n,f,J"))]
- "register_operand (operands[0], QImode)
- || register_operand (operands[1], QImode)"
+ "! TARGET_BWX
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%L1
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,fcpys,fcpys")])
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,r,r,m,f,f")
+ (match_operand:QI 1 "input_operand" "r,J,I,n,m,rJ,f,J"))]
+ "TARGET_BWX
+ && (register_operand (operands[0], QImode)
+ || reg_or_0_operand (operands[1], QImode))"
"@
bis %1,%1,%0
bis $31,$31,%0
bis $31,%1,%0
lda %0,%L1
+ ldbu %0,%1
+ stb %r1,%0
cpys %1,%1,%0
cpys $f31,$f31,%0"
- [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,fpop,fpop")])
+ [(set_attr "type" "ilog,ilog,ilog,iadd,ild,ist,fcpys,fcpys")])
;; We do two major things here: handle mem->mem and construct long
;; constants.
@@ -3121,8 +4249,9 @@
(define_insn ""
[(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,r,r,m,f,f,f,Q")
(match_operand:DI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,Q,fG"))]
- "register_operand (operands[0], DImode)
- || reg_or_0_operand (operands[1], DImode)"
+ "! TARGET_CIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
"@
bis %1,%1,%0
bis $31,$31,%0
@@ -3136,7 +4265,30 @@
cpys $f31,$f31,%0
ldt %0,%1
stt %R1,%0"
- [(set_attr "type" "iaddlog,iaddlog,iaddlog,iaddlog,iaddlog,ldsym,ld,st,fpop,fpop,ld,st")])
+ [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=r,r,r,r,r,r,r,m,f,f,f,Q,r,*f")
+ (match_operand:DI 1 "input_operand" "r,J,I,K,L,s,m,rJ,f,J,Q,fG,f,*r"))]
+ "TARGET_CIX
+ && (register_operand (operands[0], DImode)
+ || reg_or_0_operand (operands[1], DImode))"
+ "@
+ bis %1,%1,%0
+ bis $31,$31,%0
+ bis $31,%1,%0
+ lda %0,%1
+ ldah %0,%h1
+ lda %0,%1
+ ldq%A1 %0,%1
+ stq%A0 %r1,%0
+ cpys %1,%1,%0
+ cpys $f31,$f31,%0
+ ldt %0,%1
+ stt %R1,%0
+ ftoit %1,%0
+ itoft %1,%0"
+ [(set_attr "type" "ilog,ilog,ilog,iadd,iadd,ldsym,ild,ist,fcpys,fcpys,fld,fst,ftoi,itof")])
;; We do three major things here: handle mem->mem, put 64-bit constants in
;; memory, and construct long 32-bit constants.
@@ -3166,15 +4318,39 @@
}
else if (CONSTANT_P (operands[1]))
{
- operands[1] = force_const_mem (DImode, operands[1]);
- if (reload_in_progress)
+ if (TARGET_BUILD_CONSTANTS)
{
- emit_move_insn (operands[0], XEXP (operands[1], 0));
- operands[1] = copy_rtx (operands[1]);
- XEXP (operands[1], 0) = operands[0];
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT i;
+
+ if (GET_CODE (operands[1]) == CONST_INT)
+ i = INTVAL (operands[1]);
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ i = CONST_DOUBLE_LOW (operands[1]);
+ else
+ abort();
+
+ tem = alpha_emit_set_long_const (operands[0], i);
+ if (rtx_equal_p (tem, operands[0]))
+ DONE;
+ else
+ operands[1] = tem;
+#else
+ abort();
+#endif
}
else
- operands[1] = validize_mem (operands[1]);
+ {
+ operands[1] = force_const_mem (DImode, operands[1]);
+ if (reload_in_progress)
+ {
+ emit_move_insn (operands[0], XEXP (operands[1], 0));
+ operands[1] = copy_rtx (operands[1]);
+ XEXP (operands[1], 0) = operands[0];
+ }
+ else
+ operands[1] = validize_mem (operands[1]);
+ }
}
else
abort ();
@@ -3230,16 +4406,10 @@
""
"")
-;; Similar for unaligned loads. For QImode, we use the sequence from the
-;; Alpha Architecture manual. However, for HImode, we do not. HImode pointers
-;; are normally aligned to the byte boundary, so an HImode object cannot
-;; cross a longword boundary. We could use a sequence similar to that for
-;; QImode, but that would fail if the pointer, was, in fact, not aligned.
-;; Instead, we clear bit 1 in the address and do an ldl. If the low-order
-;; bit was not aligned, this will trap and the trap handler will do what is
-;; needed.
+;; Similar for unaligned loads, where we use the sequence from the
+;; Alpha Architecture manual.
;;
-;; Here operand 1 is the address. Operands 2 and 3 are temporaries, where
+;; Operand 1 is the address. Operands 2 and 3 are temporaries, where
;; operand 3 can overlap the input and output registers.
(define_expand "unaligned_loadqi"
@@ -3255,26 +4425,19 @@
""
"")
-;; For this, the address must already be in a register. We also need two
-;; DImode temporaries, neither of which may overlap the input (and hence the
-;; output, since they might be the same register), but both of which may
-;; be the same.
-
(define_expand "unaligned_loadhi"
[(set (match_operand:DI 2 "register_operand" "")
- (and:DI (match_operand:DI 1 "register_operand" "")
- (const_int -7)))
+ (mem:DI (and:DI (match_operand:DI 1 "address_operand" "")
+ (const_int -8))))
(set (match_operand:DI 3 "register_operand" "")
- (mem:DI (match_dup 2)))
- (set (match_operand:DI 4 "register_operand" "")
- (and:DI (match_dup 1) (const_int -2)))
- (set (subreg:DI (match_operand:HI 0 "register_operand" "") 0)
- (zero_extract:DI (match_dup 3)
+ (match_dup 1))
+ (set (subreg:DI (match_operand:QI 0 "register_operand" "") 0)
+ (zero_extract:DI (match_dup 2)
(const_int 16)
- (ashift:DI (match_dup 4) (const_int 3))))]
+ (ashift:DI (match_dup 3) (const_int 3))))]
""
"")
-
+
;; Storing an aligned byte or word requires two temporaries. Operand 0 is the
;; aligned SImode MEM. Operand 1 is the register containing the
;; byte or word to store. Operand 2 is the number of bits within the word that
@@ -3297,8 +4460,8 @@
<< INTVAL (operands[2])));
}")
-;; For the unaligned byte case, we use code similar to that in the
-;; Architecture book, but reordered to lower the number of registers
+;; For the unaligned byte and halfword cases, we use code similar to that
+;; in the ;; Architecture book, but reordered to lower the number of registers
;; required. Operand 0 is the address. Operand 1 is the data to store.
;; Operands 2, 3, and 4 are DImode temporaries, where operands 2 and 4 may
;; be the same temporary, if desired. If the address is in a register,
@@ -3323,42 +4486,22 @@
""
"")
-;; This is the code for storing into an unaligned short. It uses the same
-;; trick as loading from an unaligned short. It needs lots of temporaries.
-;; However, during reload, we only have two registers available. So we
-;; repeat code so that only two temporaries are available. During RTL
-;; generation, we can use different pseudos for each temporary and CSE
-;; will remove the redundancies. During reload, we have to settle with
-;; what we get. Luckily, unaligned accesses of this kind produced during
-;; reload are quite rare.
-;;
-;; Operand 0 is the address of the memory location. Operand 1 contains the
-;; data to store. The rest of the operands are all temporaries, with
-;; various overlap possibilities during reload. See reload_outhi for
-;; details of this use.
-
(define_expand "unaligned_storehi"
- [(set (match_operand:DI 2 "register_operand" "")
- (match_operand:DI 0 "address_operand" ""))
- (set (match_operand:DI 3 "register_operand" "")
- (and:DI (match_dup 2) (const_int -7)))
- (set (match_operand:DI 4 "register_operand" "")
- (mem:DI (match_dup 3)))
- (set (match_operand:DI 10 "register_operand" "")
- (and:DI (match_dup 2) (const_int -2)))
- (set (match_operand:DI 5 "register_operand" "")
+ [(set (match_operand:DI 3 "register_operand" "")
+ (mem:DI (and:DI (match_operand:DI 0 "address_operand" "")
+ (const_int -8))))
+ (set (match_operand:DI 2 "register_operand" "")
+ (match_dup 0))
+ (set (match_dup 3)
(and:DI (not:DI (ashift:DI (const_int 65535)
- (ashift:DI (match_dup 10) (const_int 3))))
- (match_dup 4)))
- (set (match_operand:DI 6 "register_operand" "")
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (match_dup 3)))
+ (set (match_operand:DI 4 "register_operand" "")
(ashift:DI (zero_extend:DI (match_operand:HI 1 "register_operand" ""))
- (ashift:DI (match_dup 10) (const_int 3))))
- (set (match_operand:DI 7 "register_operand" "")
- (ior:DI (match_dup 5) (match_dup 6)))
- (set (match_operand:DI 8 "register_operand" "") (match_dup 0))
- (set (match_operand:DI 9 "register_operand" "")
- (and:DI (match_dup 8) (const_int -7)))
- (set (mem:DI (match_dup 9)) (match_dup 7))]
+ (ashift:DI (match_dup 2) (const_int 3))))
+ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3)))
+ (set (mem:DI (and:DI (match_dup 0) (const_int -8)))
+ (match_dup 4))]
""
"")
@@ -3371,7 +4514,25 @@
(match_operand:QI 1 "general_operand" ""))]
""
"
-{ extern rtx get_unaligned_address ();
+{
+ if (TARGET_BWX)
+ {
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], QImode))
+ operands[1] = force_reg (QImode, operands[1]);
+
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! input_operand (operands[1], QImode))
+ {
+ operands[1] = alpha_emit_set_const (operands[0], QImode,
+ INTVAL (operands[1]), 3);
+
+ if (rtx_equal_p (operands[0], operands[1]))
+ DONE;
+ }
+
+ goto def;
+ }
/* If the output is not a register, the input must be. */
if (GET_CODE (operands[0]) == MEM)
@@ -3394,7 +4555,7 @@
{
rtx aligned_mem, bitnum;
rtx scratch = (reload_in_progress
- ? gen_rtx (REG, SImode, REGNO (operands[0]))
+ ? gen_rtx_REG (SImode, REGNO (operands[0]))
: gen_reg_rtx (SImode));
get_aligned_mem (operands[1], &aligned_mem, &bitnum);
@@ -3410,9 +4571,10 @@
rtx temp1 = gen_reg_rtx (DImode);
rtx temp2 = gen_reg_rtx (DImode);
- rtx seq = gen_unaligned_loadqi (operands[0],
- get_unaligned_address (operands[1]),
- temp1, temp2);
+ rtx seq
+ = gen_unaligned_loadqi (operands[0],
+ get_unaligned_address (operands[1], 0),
+ temp1, temp2);
alpha_set_memflags (seq, operands[1]);
emit_insn (seq);
@@ -3446,7 +4608,8 @@
rtx temp1 = gen_reg_rtx (DImode);
rtx temp2 = gen_reg_rtx (DImode);
rtx temp3 = gen_reg_rtx (DImode);
- rtx seq = gen_unaligned_storeqi (get_unaligned_address (operands[0]),
+ rtx seq
+ = gen_unaligned_storeqi (get_unaligned_address (operands[0], 0),
operands[1], temp1, temp2, temp3);
alpha_set_memflags (seq, operands[0]);
@@ -3454,6 +4617,7 @@
}
DONE;
}
+ def:;
}")
(define_expand "movhi"
@@ -3461,7 +4625,25 @@
(match_operand:HI 1 "general_operand" ""))]
""
"
-{ extern rtx get_unaligned_address ();
+{
+ if (TARGET_BWX)
+ {
+ if (GET_CODE (operands[0]) == MEM
+ && ! reg_or_0_operand (operands[1], HImode))
+ operands[1] = force_reg (HImode, operands[1]);
+
+ if (GET_CODE (operands[1]) == CONST_INT
+ && ! input_operand (operands[1], HImode))
+ {
+ operands[1] = alpha_emit_set_const (operands[0], HImode,
+ INTVAL (operands[1]), 3);
+
+ if (rtx_equal_p (operands[0], operands[1]))
+ DONE;
+ }
+
+ goto def;
+ }
/* If the output is not a register, the input must be. */
if (GET_CODE (operands[0]) == MEM)
@@ -3484,7 +4666,7 @@
{
rtx aligned_mem, bitnum;
rtx scratch = (reload_in_progress
- ? gen_rtx (REG, SImode, REGNO (operands[0]))
+ ? gen_rtx_REG (SImode, REGNO (operands[0]))
: gen_reg_rtx (SImode));
get_aligned_mem (operands[1], &aligned_mem, &bitnum);
@@ -3494,16 +4676,16 @@
}
else
{
- rtx addr
- = force_reg (DImode,
- force_operand (get_unaligned_address (operands[1]),
- NULL_RTX));
- rtx scratch1 = gen_reg_rtx (DImode);
- rtx scratch2 = gen_reg_rtx (DImode);
- rtx scratch3 = gen_reg_rtx (DImode);
+ /* Don't pass these as parameters since that makes the generated
+ code depend on parameter evaluation order which will cause
+ bootstrap failures. */
- rtx seq = gen_unaligned_loadhi (operands[0], addr, scratch1,
- scratch2, scratch3);
+ rtx temp1 = gen_reg_rtx (DImode);
+ rtx temp2 = gen_reg_rtx (DImode);
+ rtx seq
+ = gen_unaligned_loadhi (operands[0],
+ get_unaligned_address (operands[1], 0),
+ temp1, temp2);
alpha_set_memflags (seq, operands[1]);
emit_insn (seq);
@@ -3537,17 +4719,9 @@
rtx temp1 = gen_reg_rtx (DImode);
rtx temp2 = gen_reg_rtx (DImode);
rtx temp3 = gen_reg_rtx (DImode);
- rtx temp4 = gen_reg_rtx (DImode);
- rtx temp5 = gen_reg_rtx (DImode);
- rtx temp6 = gen_reg_rtx (DImode);
- rtx temp7 = gen_reg_rtx (DImode);
- rtx temp8 = gen_reg_rtx (DImode);
- rtx temp9 = gen_reg_rtx (DImode);
-
- rtx seq = gen_unaligned_storehi (get_unaligned_address (operands[0]),
- operands[1], temp1, temp2,temp3,
- temp4, temp5, temp6,temp7,
- temp8, temp9);
+ rtx seq
+ = gen_unaligned_storehi (get_unaligned_address (operands[0], 0),
+ operands[1], temp1, temp2, temp3);
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
@@ -3555,6 +4729,7 @@
DONE;
}
+ def:;
}")
;; Here are the versions for reload. Note that in the unaligned cases
@@ -3565,18 +4740,20 @@
[(parallel [(match_operand:QI 0 "register_operand" "=r")
(match_operand:QI 1 "unaligned_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])]
- ""
+ "! TARGET_BWX"
"
-{ extern rtx get_unaligned_address ();
- rtx addr = get_unaligned_address (operands[1]);
+{
+ rtx addr = get_unaligned_address (operands[1], 0);
+
/* It is possible that one of the registers we got for operands[2]
might coincide with that of operands[0] (which is why we made
it TImode). Pick the other one to use as our scratch. */
- rtx scratch = gen_rtx (REG, DImode,
- REGNO (operands[0]) == REGNO (operands[2])
- ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+ rtx scratch = gen_rtx_REG (DImode,
+ REGNO (operands[0]) == REGNO (operands[2])
+ ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+
rtx seq = gen_unaligned_loadqi (operands[0], addr, scratch,
- gen_rtx (REG, DImode, REGNO (operands[0])));
+ gen_rtx_REG (DImode, REGNO (operands[0])));
alpha_set_memflags (seq, operands[1]);
emit_insn (seq);
@@ -3587,21 +4764,21 @@
[(parallel [(match_operand:HI 0 "register_operand" "=r")
(match_operand:HI 1 "unaligned_memory_operand" "m")
(match_operand:TI 2 "register_operand" "=&r")])]
- ""
+ "! TARGET_BWX"
"
-{ extern rtx get_unaligned_address ();
- rtx addr = get_unaligned_address (operands[1]);
- rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
- rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
- rtx seq;
+{
+ rtx addr = get_unaligned_address (operands[1], 0);
+
+ /* It is possible that one of the registers we got for operands[2]
+ might coincide with that of operands[0] (which is why we made
+ it TImode). Pick the other one to use as our scratch. */
+ rtx scratch = gen_rtx_REG (DImode,
+ REGNO (operands[0]) == REGNO (operands[2])
+ ? REGNO (operands[2]) + 1 : REGNO (operands[2]));
+
+ rtx seq = gen_unaligned_loadhi (operands[0], addr, scratch,
+ gen_rtx_REG (DImode, REGNO (operands[0])));
- if (GET_CODE (addr) != REG)
- {
- emit_insn (gen_rtx (SET, VOIDmode, scratch2, addr));
- addr = scratch2;
- }
-
- seq = gen_unaligned_loadhi (operands[0], addr, scratch1, scratch1, scratch2);
alpha_set_memflags (seq, operands[1]);
emit_insn (seq);
DONE;
@@ -3611,10 +4788,9 @@
[(parallel [(match_operand:QI 0 "any_memory_operand" "=m")
(match_operand:QI 1 "register_operand" "r")
(match_operand:TI 2 "register_operand" "=&r")])]
- ""
+ "! TARGET_BWX"
"
-{ extern rtx get_unaligned_address ();
-
+{
if (aligned_memory_operand (operands[0], QImode))
{
rtx aligned_mem, bitnum;
@@ -3622,15 +4798,15 @@
get_aligned_mem (operands[0], &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
- gen_rtx (REG, SImode, REGNO (operands[2])),
- gen_rtx (REG, SImode,
- REGNO (operands[2]) + 1)));
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode,
+ REGNO (operands[2]) + 1)));
}
else
{
- rtx addr = get_unaligned_address (operands[0]);
- rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
- rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
+ rtx addr = get_unaligned_address (operands[0], 0);
+ rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
rtx scratch3 = scratch1;
rtx seq;
@@ -3650,10 +4826,9 @@
[(parallel [(match_operand:HI 0 "any_memory_operand" "=m")
(match_operand:HI 1 "register_operand" "r")
(match_operand:TI 2 "register_operand" "=&r")])]
- ""
+ "! TARGET_BWX"
"
-{ extern rtx get_unaligned_address ();
-
+{
if (aligned_memory_operand (operands[0], HImode))
{
rtx aligned_mem, bitnum;
@@ -3661,22 +4836,23 @@
get_aligned_mem (operands[0], &aligned_mem, &bitnum);
emit_insn (gen_aligned_store (aligned_mem, operands[1], bitnum,
- gen_rtx (REG, SImode, REGNO (operands[2])),
- gen_rtx (REG, SImode,
- REGNO (operands[2]) + 1)));
+ gen_rtx_REG (SImode, REGNO (operands[2])),
+ gen_rtx_REG (SImode,
+ REGNO (operands[2]) + 1)));
}
else
{
- rtx addr = get_unaligned_address (operands[0]);
- rtx scratch1 = gen_rtx (REG, DImode, REGNO (operands[2]));
- rtx scratch2 = gen_rtx (REG, DImode, REGNO (operands[2]) + 1);
- rtx scratch_a = GET_CODE (addr) == REG ? addr : scratch1;
+ rtx addr = get_unaligned_address (operands[0], 0);
+ rtx scratch1 = gen_rtx_REG (DImode, REGNO (operands[2]));
+ rtx scratch2 = gen_rtx_REG (DImode, REGNO (operands[2]) + 1);
+ rtx scratch3 = scratch1;
rtx seq;
- seq = gen_unaligned_storehi (addr, operands[1], scratch_a,
- scratch2, scratch2, scratch2,
- scratch1, scratch2, scratch_a,
- scratch1, scratch_a);
+ if (GET_CODE (addr) == REG)
+ scratch1 = addr;
+
+ seq = gen_unaligned_storehi (addr, operands[1], scratch1,
+ scratch2, scratch3);
alpha_set_memflags (seq, operands[0]);
emit_insn (seq);
}
@@ -3684,14 +4860,133 @@
DONE;
}")
+;; Bit field extract patterns which use ext[wlq][lh]
+
+(define_expand "extv"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extract:DI (match_operand:QI 1 "memory_operand" "")
+ (match_operand:DI 2 "immediate_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")))]
+ ""
+ "
+{
+ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[3]) % 8 != 0
+ || (INTVAL (operands[2]) != 16
+ && INTVAL (operands[2]) != 32
+ && INTVAL (operands[2]) != 64))
+ FAIL;
+
+ /* From mips.md: extract_bit_field doesn't verify that our source
+ matches the predicate, so we force it to be a MEM here. */
+ if (GET_CODE (operands[1]) != MEM)
+ FAIL;
+
+ alpha_expand_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]) / 8,
+ INTVAL (operands[3]) / 8, 1);
+ DONE;
+}")
+
+(define_expand "extzv"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extract:DI (match_operand:DI 1 "general_operand" "")
+ (match_operand:DI 2 "immediate_operand" "")
+ (match_operand:DI 3 "immediate_operand" "")))]
+ ""
+ "
+{
+ /* We can do 8, 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[3]) % 8 != 0
+ || (INTVAL (operands[2]) != 8
+ && INTVAL (operands[2]) != 16
+ && INTVAL (operands[2]) != 32
+ && INTVAL (operands[2]) != 64))
+ FAIL;
+
+ if (GET_CODE (operands[1]) == MEM)
+ {
+ /* Fail 8 bit fields, falling back on a simple byte load. */
+ if (INTVAL (operands[2]) == 8)
+ FAIL;
+
+ alpha_expand_unaligned_load (operands[0], operands[1],
+ INTVAL (operands[2]) / 8,
+ INTVAL (operands[3]) / 8, 0);
+ DONE;
+ }
+}")
+
+(define_expand "insv"
+ [(set (zero_extract:DI (match_operand:QI 0 "memory_operand" "")
+ (match_operand:DI 1 "immediate_operand" "")
+ (match_operand:DI 2 "immediate_operand" ""))
+ (match_operand:DI 3 "register_operand" ""))]
+ ""
+ "
+{
+ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */
+ if (INTVAL (operands[2]) % 8 != 0
+ || (INTVAL (operands[1]) != 16
+ && INTVAL (operands[1]) != 32
+ && INTVAL (operands[1]) != 64))
+ FAIL;
+
+ /* From mips.md: store_bit_field doesn't verify that our source
+ matches the predicate, so we force it to be a MEM here. */
+ if (GET_CODE (operands[0]) != MEM)
+ FAIL;
+
+ alpha_expand_unaligned_store (operands[0], operands[3],
+ INTVAL (operands[1]) / 8,
+ INTVAL (operands[2]) / 8);
+ DONE;
+}")
+
+
+
+;; Block move/clear, see alpha.c for more details.
+;; Argument 0 is the destination
+;; Argument 1 is the source
+;; Argument 2 is the length
+;; Argument 3 is the alignment
+
+(define_expand "movstrqi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ (match_operand:BLK 1 "general_operand" ""))
+ (use (match_operand:DI 2 "immediate_operand" ""))
+ (use (match_operand:DI 3 "immediate_operand" ""))])]
+ ""
+ "
+{
+ if (alpha_expand_block_move (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
+(define_expand "clrstrqi"
+ [(parallel [(set (match_operand:BLK 0 "general_operand" "")
+ (const_int 0))
+ (use (match_operand:DI 1 "immediate_operand" ""))
+ (use (match_operand:DI 2 "immediate_operand" ""))])]
+ ""
+ "
+{
+ if (alpha_expand_block_clear (operands))
+ DONE;
+ else
+ FAIL;
+}")
+
;; Subroutine of stack space allocation. Perform a stack probe.
(define_expand "probe_stack"
[(set (match_dup 1) (match_operand:DI 0 "const_int_operand" ""))]
""
"
{
- operands[1] = gen_rtx (MEM, DImode, plus_constant (stack_pointer_rtx,
- INTVAL (operands[0])));
+ operands[1] = gen_rtx_MEM (DImode, plus_constant (stack_pointer_rtx,
+ INTVAL (operands[0])));
MEM_VOLATILE_P (operands[1]) = 1;
operands[0] = const0_rtx;
@@ -3706,14 +5001,16 @@
(define_expand "allocate_stack"
[(set (reg:DI 30)
(plus:DI (reg:DI 30)
- (match_operand:DI 0 "reg_or_cint_operand" "")))]
+ (match_operand:DI 1 "reg_or_cint_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 2))]
""
"
{
- if (GET_CODE (operands[0]) == CONST_INT
- && INTVAL (operands[0]) < 32768)
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < 32768)
{
- if (INTVAL (operands[0]) >= 4096)
+ if (INTVAL (operands[1]) >= 4096)
{
/* We do this the same way as in the prologue and generate explicit
probes. Then we update the stack by the constant. */
@@ -3721,14 +5018,15 @@
int probed = 4096;
emit_insn (gen_probe_stack (GEN_INT (- probed)));
- while (probed + 8192 < INTVAL (operands[0]))
+ while (probed + 8192 < INTVAL (operands[1]))
emit_insn (gen_probe_stack (GEN_INT (- (probed += 8192))));
- if (probed + 4096 < INTVAL (operands[0]))
- emit_insn (gen_probe_stack (GEN_INT (- INTVAL(operands[0]))));
+ if (probed + 4096 < INTVAL (operands[1]))
+ emit_insn (gen_probe_stack (GEN_INT (- INTVAL(operands[1]))));
}
- operands[0] = GEN_INT (- INTVAL (operands[0]));
+ operands[1] = GEN_INT (- INTVAL (operands[1]));
+ operands[2] = virtual_stack_dynamic_rtx;
}
else
{
@@ -3739,10 +5037,10 @@
rtx memref;
emit_insn (gen_subdi3 (want, stack_pointer_rtx,
- force_reg (Pmode, operands[0])));
+ force_reg (Pmode, operands[1])));
emit_insn (gen_adddi3 (tmp, stack_pointer_rtx, GEN_INT (-4096)));
- if (GET_CODE (operands[0]) != CONST_INT)
+ if (GET_CODE (operands[1]) != CONST_INT)
{
out_label = gen_label_rtx ();
emit_insn (gen_cmpdi (want, tmp));
@@ -3750,13 +5048,16 @@
}
emit_label (loop_label);
- memref = gen_rtx (MEM, DImode, tmp);
+ memref = gen_rtx_MEM (DImode, tmp);
MEM_VOLATILE_P (memref) = 1;
emit_move_insn (memref, const0_rtx);
emit_insn (gen_adddi3 (tmp, tmp, GEN_INT(-8192)));
emit_insn (gen_cmpdi (tmp, want));
emit_jump_insn (gen_bgtu (loop_label));
- memref = gen_rtx (MEM, DImode, want);
+ if (obey_regdecls)
+ gen_rtx_USE (VOIDmode, tmp);
+
+ memref = gen_rtx_MEM (DImode, want);
MEM_VOLATILE_P (memref) = 1;
emit_move_insn (memref, const0_rtx);
@@ -3764,7 +5065,160 @@
emit_label (out_label);
emit_move_insn (stack_pointer_rtx, want);
-
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
DONE;
}
}")
+
+;; This is used by alpha_expand_prolog to do the same thing as above,
+;; except we cannot at that time generate new basic blocks, so we hide
+;; the loop in this one insn.
+
+(define_insn "prologue_stack_probe_loop"
+ [(unspec_volatile [(match_operand 0 "register_operand" "r")
+ (match_operand 1 "register_operand" "r")] 5)]
+ ""
+ "*
+{
+ static int label_no;
+ int count_regno = REGNO (operands[0]);
+ int ptr_regno = REGNO (operands[1]);
+ char label[64];
+
+ /* Ho hum, output the hard way to get the label at the beginning of
+ the line. Wish there were a magic char you could get
+ asm_output_printf to do that. Then we could use %= as well and
+ get rid of the label_no bits here too. */
+
+ ASM_GENERATE_INTERNAL_LABEL (label, \"LSC\", label_no);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"LSC\", label_no++);
+
+ fprintf (asm_out_file, \"\\tstq $31,-8192($%d)\\n\", ptr_regno);
+ fprintf (asm_out_file, \"\\tsubq $%d,1,$%d\\n\", count_regno, count_regno);
+ fprintf (asm_out_file, \"\\tlda $%d,-8192($%d)\\n\", ptr_regno, ptr_regno);
+ fprintf (asm_out_file, \"\\tbne $%d,\", count_regno);
+ assemble_name (asm_out_file, label);
+ putc ('\\n', asm_out_file);
+
+ return \"\";
+}"
+ [(set_attr "length" "16")])
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+ "alpha_expand_prologue (); DONE;")
+
+(define_insn "init_fp"
+ [(set (match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (clobber (mem:BLK (match_operand:DI 2 "register_operand" "r")))]
+ ""
+ "bis %1,%1,%0")
+
+(define_expand "epilogue"
+ [(clobber (const_int 0))]
+ ""
+ "alpha_expand_epilogue (); DONE;")
+
+(define_expand "builtin_longjmp"
+ [(unspec_volatile [(match_operand 0 "register_operand" "r")] 3)]
+ "! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT"
+ "
+{
+ /* The elements of the buffer are, in order: */
+ rtx fp = gen_rtx_MEM (Pmode, operands[0]);
+ rtx lab = gen_rtx_MEM (Pmode, plus_constant (operands[0], 8));
+ rtx stack = gen_rtx_MEM (Pmode, plus_constant (operands[0], 16));
+ rtx pv = gen_rtx_REG (Pmode, 27);
+
+ /* This bit is the same as expand_builtin_longjmp. */
+ emit_move_insn (hard_frame_pointer_rtx, fp);
+ emit_move_insn (pv, lab);
+ emit_stack_restore (SAVE_NONLOCAL, stack, NULL_RTX);
+ emit_insn (gen_rtx_USE (VOIDmode, hard_frame_pointer_rtx));
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+
+ /* Load the label we are jumping through into $27 so that we know
+ where to look for it when we get back to setjmp's function for
+ restoring the gp. */
+ emit_indirect_jump (pv);
+}")
+
+(define_insn "builtin_setjmp_receiver"
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ "! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT && TARGET_AS_CAN_SUBTRACT_LABELS"
+ "\\n$LSJ%=:\;ldgp $29,$LSJ%=-%l0($27)"
+ [(set_attr "length" "8")])
+
+(define_insn ""
+ [(unspec_volatile [(match_operand 0 "" "")] 2)]
+ "! TARGET_OPEN_VMS && ! TARGET_WINDOWS_NT"
+ "br $27,$LSJ%=\\n$LSJ%=:\;ldgp $29,0($27)"
+ [(set_attr "length" "12")])
+
+(define_expand "nonlocal_goto_receiver"
+ [(unspec_volatile [(const_int 0)] 1)
+ (set (reg:DI 27) (mem:DI (reg:DI 29)))
+ (unspec_volatile [(const_int 0)] 1)
+ (use (reg:DI 27))]
+ "TARGET_OPEN_VMS"
+ "")
+
+(define_insn "arg_home"
+ [(unspec [(const_int 0)] 0)
+ (use (reg:DI 1))
+ (use (reg:DI 25))
+ (use (reg:DI 16))
+ (use (reg:DI 17))
+ (use (reg:DI 18))
+ (use (reg:DI 19))
+ (use (reg:DI 20))
+ (use (reg:DI 21))
+ (use (reg:DI 48))
+ (use (reg:DI 49))
+ (use (reg:DI 50))
+ (use (reg:DI 51))
+ (use (reg:DI 52))
+ (use (reg:DI 53))
+ (clobber (mem:BLK (const_int 0)))
+ (clobber (reg:DI 24))
+ (clobber (reg:DI 25))
+ (clobber (reg:DI 0))]
+ "TARGET_OPEN_VMS"
+ "lda $0,OTS$HOME_ARGS\;ldq $0,8($0)\;jsr $0,OTS$HOME_ARGS"
+ [(set_attr "length" "16")])
+
+;; Close the trap shadow of preceeding instructions. This is generated
+;; by alpha_reorg.
+
+(define_insn "trapb"
+ [(unspec_volatile [(const_int 0)] 4)]
+ ""
+ "trapb"
+ [(set_attr "type" "misc")])
+
+;; Peepholes go at the end.
+
+;; Optimize sign-extension of SImode loads. This shows up in the wake of
+;; reload when converting fp->int.
+;;
+;; ??? What to do now that we actually care about the packing and
+;; alignment of instructions? Perhaps reload can be enlightened, or
+;; the peephole pass moved up after reload but before sched2?
+;
+;(define_peephole
+; [(set (match_operand:SI 0 "register_operand" "=r")
+; (match_operand:SI 1 "memory_operand" "m"))
+; (set (match_operand:DI 2 "register_operand" "=r")
+; (sign_extend:DI (match_dup 0)))]
+; "dead_or_set_p (insn, operands[0])"
+; "ldl %2,%1")
+;
+;(define_peephole
+; [(set (match_operand:SI 0 "register_operand" "=r")
+; (match_operand:SI 1 "hard_fp_register_operand" "f"))
+; (set (match_operand:DI 2 "register_operand" "=r")
+; (sign_extend:DI (match_dup 0)))]
+; "TARGET_CIX && dead_or_set_p (insn, operands[0])"
+; "ftois %1,%2")
diff --git a/contrib/gcc/config/alpha/crtbegin.asm b/contrib/gcc/config/alpha/crtbegin.asm
new file mode 100644
index 0000000..c28440d
--- /dev/null
+++ b/contrib/gcc/config/alpha/crtbegin.asm
@@ -0,0 +1,111 @@
+ # Copyright (C) 1996, 1998 Free Software Foundation, Inc.
+ # Contributed by Richard Henderson (rth@tamu.edu)
+ #
+ # This file is free software; you can redistribute it and/or modify it
+ # under the terms of the GNU General Public License as published by the
+ # Free Software Foundation; either version 2, or (at your option) any
+ # later version.
+ #
+ # In addition to the permissions in the GNU General Public License, the
+ # Free Software Foundation gives you unlimited permission to link the
+ # compiled version of this file with other programs, and to distribute
+ # those programs without any restriction coming from the use of this
+ # file. (The General Public License restrictions do apply in other
+ # respects; for example, they cover modification of the file, and
+ # distribution when not linked into another program.)
+ #
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ # General Public License for more details.
+ #
+ # You should have received a copy of the GNU General Public License
+ # along with this program; see the file COPYING. If not, write to
+ # the Free Software Foundation, 59 Temple Place - Suite 330,
+ # Boston, MA 02111-1307, USA.
+ #
+ # As a special exception, if you link this library with files
+ # compiled with GCC to produce an executable, this does not cause
+ # the resulting executable to be covered by the GNU General Public License.
+ # This exception does not however invalidate any other reasons why
+ # the executable file might be covered by the GNU General Public License.
+
+ #
+ # Heads of the constructor/destructor lists.
+ #
+
+ # The __*TOR_LIST__ symbols are not global because when this file is used
+ # in a shared library, we do not want the symbol to fall over to the
+ # application's lists.
+
+.section .ctors,"aw"
+
+ .align 3
+__CTOR_LIST__:
+ .quad -1
+
+.section .dtors,"aw"
+
+ .align 3
+__DTOR_LIST__:
+ .quad -1
+
+
+ #
+ # Fragment of the ELF _fini routine that invokes our dtor cleanup.
+ #
+
+.section .fini,"ax"
+
+ # Since the bits of the _fini function are spread across many
+ # object files, each potentially with its own GP, we must
+ # assume we need to load ours. Further, our .fini section
+ # can easily be more than 4MB away from our .text bits so we
+ # can't use bsr.
+
+ br $29,1f
+1: ldgp $29,0($29)
+ jsr $26,__do_global_dtors_aux
+
+ # Must match the alignment we got from crti.o else we get
+ # zero-filled holes in our _fini function and then SIGILL.
+ .align 3
+
+ #
+ # Invoke our destructors in order.
+ #
+
+.data
+
+ # Support recursive calls to exit.
+9: .quad __DTOR_LIST__
+
+.text
+
+ .align 3
+ .ent __do_global_dtors_aux
+
+__do_global_dtors_aux:
+ ldgp $29,0($27)
+ lda $30,-16($30)
+ .frame $30,16,$26,0
+ stq $9,8($30)
+ stq $26,0($30)
+ .mask 0x4000200,-16
+ .prologue 1
+
+ lda $9,9b
+ br 1f
+0: stq $1,0($9)
+ jsr $26,($27)
+1: ldq $1,0($9)
+ ldq $27,8($1)
+ addq $1,8,$1
+ bne $27,0b
+
+ ldq $26,0($30)
+ ldq $9,8($30)
+ lda $30,16($30)
+ ret
+
+ .end __do_global_dtors_aux
diff --git a/contrib/gcc/config/alpha/crtend.asm b/contrib/gcc/config/alpha/crtend.asm
new file mode 100644
index 0000000..36f11b9
--- /dev/null
+++ b/contrib/gcc/config/alpha/crtend.asm
@@ -0,0 +1,105 @@
+ # Copyright (C) 1996 Free Software Foundation, Inc.
+ # Contributed by Richard Henderson (rth@tamu.edu)
+ #
+ # This file is free software; you can redistribute it and/or modify it
+ # under the terms of the GNU General Public License as published by the
+ # Free Software Foundation; either version 2, or (at your option) any
+ # later version.
+ #
+ # In addition to the permissions in the GNU General Public License, the
+ # Free Software Foundation gives you unlimited permission to link the
+ # compiled version of this file with other programs, and to distribute
+ # those programs without any restriction coming from the use of this
+ # file. (The General Public License restrictions do apply in other
+ # respects; for example, they cover modification of the file, and
+ # distribution when not linked into another program.)
+ #
+ # This file is distributed in the hope that it will be useful, but
+ # WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ # General Public License for more details.
+ #
+ # You should have received a copy of the GNU General Public License
+ # along with this program; see the file COPYING. If not, write to
+ # the Free Software Foundation, 59 Temple Place - Suite 330,
+ # Boston, MA 02111-1307, USA.
+ #
+ # As a special exception, if you link this library with files
+ # compiled with GCC to produce an executable, this does not cause
+ # the resulting executable to be covered by the GNU General Public License.
+ # This exception does not however invalidate any other reasons why
+ # the executable file might be covered by the GNU General Public License.
+
+ #
+ # Tails of the constructor/destructor lists.
+ #
+
+ # The __*TOR_END__ symbols are not global because when this file is used
+ # in a shared library, we do not want the symbol to fall over to the
+ # application's lists.
+
+.section .ctors,"aw"
+
+ .align 3
+__CTOR_END__:
+ .quad 0
+
+.section .dtors,"aw"
+
+ .align 3
+__DTOR_END__:
+ .quad 0
+
+
+ #
+ # Fragment of the ELF _init routine that invokes our ctor startup
+ #
+
+.section .init,"ax"
+
+ # Since the bits of the _init function are spread across many
+ # object files, each potentially with its own GP, we must
+ # assume we need to load ours. Further, our .init section
+ # can easily be more than 4MB away from our .text bits so we
+ # can't use bsr.
+
+ br $29,1f
+1: ldgp $29,0($29)
+ jsr $26,__do_global_ctors_aux
+
+ # Must match the alignment we got from crti.o else we get
+ # zero-filled holes in our _init function and thense SIGILL.
+ .align 3
+
+ #
+ # Invoke our destructors in order.
+ #
+
+.text
+
+ .align 3
+ .ent __do_global_ctors_aux
+
+__do_global_ctors_aux:
+ ldgp $29,0($27)
+ lda $30,-16($30)
+ .frame $30,16,$26,0
+ stq $9,8($30)
+ stq $26,0($30)
+ .mask 0x4000200,-16
+ .prologue 1
+
+ lda $9,__CTOR_END__
+ br 1f
+0: jsr $26,($27)
+1: ldq $27,-8($9)
+ subq $9,8,$9
+ not $27,$0
+ bne $0,0b
+
+ ldq $26,0($30)
+ ldq $9,8($30)
+ lda $30,16($30)
+ ret
+
+ .end __do_global_ctors_aux
diff --git a/contrib/gcc/config/alpha/elf.h b/contrib/gcc/config/alpha/elf.h
index 82f0410..4f4703c 100644
--- a/contrib/gcc/config/alpha/elf.h
+++ b/contrib/gcc/config/alpha/elf.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for DEC Alpha w/ELF.
- Copyright (C) 1996 Free Software Foundation, Inc.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
Contributed by Richard Henderson (rth@tamu.edu).
This file is part of GNU CC.
@@ -19,53 +19,49 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* This is used on Alpha platforms that use the ELF format.
-Currently only Linux uses this. */
-
-#if 0
-#include "alpha/linux.h"
-#endif
-
-#undef TARGET_VERSION
-#define TARGET_VERSION fprintf (stderr, " (Alpha Linux/ELF)");
-
#undef OBJECT_FORMAT_COFF
#undef EXTENDED_COFF
#define OBJECT_FORMAT_ELF
-#define SDB_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
#undef ASM_FINAL_SPEC
-#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "\
--D__alpha -D__alpha__ -D__linux__ -D__linux -D_LONGLONG -Dlinux -Dunix \
--Asystem(linux) -Acpu(alpha) -Amachine(alpha) -D__ELF__"
+#undef CC1_SPEC
+#define CC1_SPEC "%{G*}"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{G*} %{relax:-relax}"
#undef LINK_SPEC
-#define LINK_SPEC "-m elf64alpha -G 8 %{O*:-O3} %{!O*:-O1} \
+#define LINK_SPEC "-m elf64alpha %{G*} %{relax:-relax} \
+ %{O*:-O3} %{!O*:-O1} \
%{shared:-shared} \
%{!shared: \
%{!static: \
%{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib/ld.so.1}} \
+ %{!dynamic-linker:-dynamic-linker %(elf_dynamic_linker)}} \
%{static:-static}}"
/* Output at beginning of assembler file. */
-
#undef ASM_FILE_START
#define ASM_FILE_START(FILE) \
{ \
alpha_write_verstamp (FILE); \
output_file_directive (FILE, main_input_filename); \
- fprintf (FILE, "\t.version\t\"01.01\"\n"); \
fprintf (FILE, "\t.set noat\n"); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ if (TARGET_BWX | TARGET_MAX | TARGET_CIX) \
+ { \
+ fprintf (FILE, "\t.arch %s\n", \
+ (alpha_cpu == PROCESSOR_EV6 ? "ev6" \
+ : TARGET_MAX ? "pca56" : "ev56")); \
+ } \
}
-#define ASM_OUTPUT_SOURCE_LINE(STREAM, LINE) \
- alpha_output_lineno (STREAM, LINE)
-extern void alpha_output_lineno ();
-
extern void output_file_directive ();
/* Attach a special .ident directive to the end of the file to identify
@@ -77,8 +73,8 @@ extern void output_file_directive ();
#ifdef IDENTIFY_WITH_IDENT
#define ASM_IDENTIFY_GCC(FILE) /* nothing */
-#define ASM_IDENTIFY_LANGUAGE(FILE) \
- fprintf(FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
+#define ASM_IDENTIFY_LANGUAGE(FILE) \
+ fprintf(FILE, "\t%s \"GCC (%s) %s\"\n", IDENT_ASM_OP, \
lang_identify(), version_string)
#else
#define ASM_FILE_END(FILE) \
@@ -89,11 +85,9 @@ do { \
#endif
/* Allow #sccs in preprocessor. */
-
#define SCCS_DIRECTIVE
/* Output #ident as a .ident. */
-
#define ASM_OUTPUT_IDENT(FILE, NAME) \
fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
@@ -155,22 +149,46 @@ do { \
the linker seems to want the alignment of data objects
to depend on their types. We do exactly that here. */
-#define LOCAL_ASM_OP ".local"
-
#undef ASM_OUTPUT_ALIGNED_LOCAL
#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
do { \
- fprintf ((FILE), "\t%s\t", LOCAL_ASM_OP); \
- assemble_name ((FILE), (NAME)); \
- fprintf ((FILE), "\n"); \
- ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+ if ((SIZE) <= g_switch_value) \
+ sbss_section(); \
+ else \
+ bss_section(); \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ if (!flag_inhibit_size_directive) \
+ { \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", (SIZE)); \
+ } \
+ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ASM_OUTPUT_SKIP((FILE), (SIZE)); \
} while (0)
/* This is the pseudo-op used to generate a 64-bit word of data with a
- specific value in some section. */
+ specific value in some section. */
#define INT_ASM_OP ".quad"
+/* Biggest alignment supported by the object file format of this
+ machine. Use this macro to limit the alignment which can be
+ specified using the `__attribute__ ((aligned (N)))' construct. If
+ not defined, the default value is `BIGGEST_ALIGNMENT'.
+
+ This value is really 2^63. Since gcc figures the alignment in bits,
+ we could only potentially get to 2^60 on suitible hosts. Due to other
+ considerations in varasm, we must restrict this to what fits in an int. */
+
+#define MAX_OFILE_ALIGNMENT \
+ (1 << (HOST_BITS_PER_INT < 64 ? HOST_BITS_PER_INT - 2 : 62))
+
/* This is the pseudo-op used to generate a contiguous sequence of byte
values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
@@ -208,6 +226,11 @@ do { \
#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
+/* Handle the small data sections. */
+#define BSS_SECTION_ASM_OP ".section\t.bss"
+#define SBSS_SECTION_ASM_OP ".section\t.sbss,\"aw\""
+#define SDATA_SECTION_ASM_OP ".section\t.sdata,\"aw\""
+
/* On svr4, we *do* have support for the .init and .fini sections, and we
can put stuff in there to be executed before and after `main'. We let
crtstuff.c and other files know this by defining the following symbols.
@@ -217,17 +240,13 @@ do { \
#define INIT_SECTION_ASM_OP ".section\t.init"
#define FINI_SECTION_ASM_OP ".section\t.fini"
-/* Support non-common, uninitialized data in the .bss section. */
-
-#define BSS_SECTION_ASM_OP ".section\t.bss"
-
/* A default list of other sections which we might be "in" at any given
time. For targets that use additional sections (e.g. .tdesc) you
should override this definition in the target-specific file which
includes this file. */
#undef EXTRA_SECTIONS
-#define EXTRA_SECTIONS in_const, in_ctors, in_dtors, in_bss
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors, in_sbss, in_sdata
/* A default list of extra section function definitions. For targets
that use additional sections (e.g. .tdesc) you should override this
@@ -236,9 +255,10 @@ do { \
#undef EXTRA_SECTION_FUNCTIONS
#define EXTRA_SECTION_FUNCTIONS \
CONST_SECTION_FUNCTION \
- CTORS_SECTION_FUNCTION \
- DTORS_SECTION_FUNCTION \
- BSS_SECTION_FUNCTION
+ SECTION_FUNCTION_TEMPLATE(ctors_section, in_ctors, CTORS_SECTION_ASM_OP) \
+ SECTION_FUNCTION_TEMPLATE(dtors_section, in_dtors, DTORS_SECTION_ASM_OP) \
+ SECTION_FUNCTION_TEMPLATE(sbss_section, in_sbss, SBSS_SECTION_ASM_OP) \
+ SECTION_FUNCTION_TEMPLATE(sdata_section, in_sdata, SDATA_SECTION_ASM_OP)
#undef READONLY_DATA_SECTION
#define READONLY_DATA_SECTION() const_section ()
@@ -258,36 +278,13 @@ const_section () \
} \
}
-#define CTORS_SECTION_FUNCTION \
-void \
-ctors_section () \
+#define SECTION_FUNCTION_TEMPLATE(FN, ENUM, OP) \
+void FN () \
{ \
- if (in_section != in_ctors) \
+ if (in_section != ENUM) \
{ \
- fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
- in_section = in_ctors; \
- } \
-}
-
-#define DTORS_SECTION_FUNCTION \
-void \
-dtors_section () \
-{ \
- if (in_section != in_dtors) \
- { \
- fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
- in_section = in_dtors; \
- } \
-}
-
-#define BSS_SECTION_FUNCTION \
-void \
-bss_section () \
-{ \
- if (in_section != in_bss) \
- { \
- fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); \
- in_section = in_bss; \
+ fprintf (asm_out_file, "%s\n", OP); \
+ in_section = ENUM; \
} \
}
@@ -297,10 +294,10 @@ bss_section () \
We make the section read-only and executable for a function decl,
read-only for a const data decl, and writable for a non-const data decl. */
-#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME) \
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
(DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
- (DECL) && TREE_READONLY (DECL) ? "a" : "aw")
+ (DECL) && DECL_READONLY_SECTION (DECL, RELOC) ? "a" : "aw")
/* A C statement (sans semicolon) to output an element in the table of
@@ -344,11 +341,10 @@ bss_section () \
|| !DECL_INITIAL (DECL) \
|| (DECL_INITIAL (DECL) != error_mark_node \
&& !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
- { \
- if (DECL_COMMON (DECL) \
- && !DECL_INITIAL (DECL)) \
- /* || DECL_INITIAL (DECL) == error_mark_node)) */ \
- bss_section(); \
+ { \
+ int size = int_size_in_bytes (TREE_TYPE (DECL)); \
+ if (size >= 0 && size <= g_switch_value) \
+ sdata_section (); \
else \
data_section (); \
} \
@@ -486,39 +482,39 @@ do { \
escape sequence like \377 would count as four bytes.
If your target assembler doesn't support the .string directive, you
- should define this to zero.
-*/
+ should define this to zero. */
#define STRING_LIMIT ((unsigned) 256)
-
#define STRING_ASM_OP ".string"
-/*
- * We always use gas here, so we don't worry about ECOFF assembler problems.
- */
+/* GAS is the only Alpha/ELF assembler. */
#undef TARGET_GAS
#define TARGET_GAS (1)
-#undef PREFERRED_DEBUGGING_TYPE
-#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+/* Provide a STARTFILE_SPEC appropriate for ELF. Here we add the
+ (even more) magical crtbegin.o file which provides part of the
+ support for getting C++ file-scope static object constructed before
+ entering `main'.
-/* Provide a STARTFILE_SPEC appropriate for Linux. Here we add
- the Linux magical crtbegin.o file (see crtstuff.c) which
- provides part of the support for getting C++ file-scope static
- object constructed before entering `main'. */
+ Don't bother seeing crtstuff.c -- there is absolutely no hope of
+ getting that file to understand multiple GPs. GNU Libc provides a
+ hand-coded version that is used on Linux; it could be copied here
+ if there is ever a need. */
#undef STARTFILE_SPEC
#define STARTFILE_SPEC \
"%{!shared: \
%{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
- crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+ crti.o%s crtbegin.o%s"
-/* Provide a ENDFILE_SPEC appropriate for Linux. Here we tack on
- the Linux magical crtend.o file (see crtstuff.c) which
- provides part of the support for getting C++ file-scope static
- object constructed before entering `main', followed by a normal
- Linux "finalizer" file, `crtn.o'. */
+/* Provide a ENDFILE_SPEC appropriate for ELF. Here we tack on the
+ magical crtend.o file which provides part of the support for
+ getting C++ file-scope static object constructed before entering
+ `main', followed by a normal ELF "finalizer" file, `crtn.o'. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC \
- "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+ "crtend.o%s crtn.o%s"
+
+/* We support #pragma. */
+#define HANDLE_SYSV_PRAGMA
diff --git a/contrib/gcc/config/alpha/linux-ecoff.h b/contrib/gcc/config/alpha/linux-ecoff.h
new file mode 100644
index 0000000..a6cd5b2
--- /dev/null
+++ b/contrib/gcc/config/alpha/linux-ecoff.h
@@ -0,0 +1,36 @@
+/* Definitions of target machine for GNU compiler
+ for Alpha Linux-based GNU systems using ECOFF.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Bob Manson.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha GNU/Linux for ECOFF)");
+
+#undef SUB_CPP_PREDEFINES
+#define SUB_CPP_PREDEFINES "-D__ECOFF__"
+
+#undef LINK_SPEC
+#define LINK_SPEC "-G 8 %{O*:-O3} %{!O*:-O1}"
+
+/* stabs get slurped by the assembler into a queer ecoff format. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* We support #pragma. */
+#define HANDLE_SYSV_PRAGMA
diff --git a/contrib/gcc/config/alpha/linux-elf.h b/contrib/gcc/config/alpha/linux-elf.h
new file mode 100644
index 0000000..90009f1
--- /dev/null
+++ b/contrib/gcc/config/alpha/linux-elf.h
@@ -0,0 +1,47 @@
+/* Definitions of target machine for GNU compiler
+ for Alpha Linux-based GNU systems using ELF.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Richard Henderson.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha GNU/Linux for ELF)");
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+{ "elf_dynamic_linker", ELF_DYNAMIC_LINKER },
+
+#undef SUB_CPP_PREDEFINES
+#define SUB_CPP_PREDEFINES "-D__ELF__"
+
+#ifdef USE_GNULIBC_1
+#define ELF_DYNAMIC_LINKER "/lib/ld.so.1"
+#else
+#define ELF_DYNAMIC_LINKER "/lib/ld-linux.so.2"
+#endif
+
+#ifndef USE_GNULIBC_1
+#undef DEFAULT_VTABLE_THUNKS
+#define DEFAULT_VTABLE_THUNKS 1
+#endif
+
+#ifndef USE_GNULIBC_1
+#undef LIB_SPEC
+#define LIB_SPEC \
+"%{shared:-lc}%{!shared:%{pthread:-lpthread }%{profile:-lc_p}%{!profile:-lc}} "
+#endif
diff --git a/contrib/gcc/config/alpha/linux.h b/contrib/gcc/config/alpha/linux.h
new file mode 100644
index 0000000..3791c89
--- /dev/null
+++ b/contrib/gcc/config/alpha/linux.h
@@ -0,0 +1,44 @@
+/* Definitions of target machine for GNU compiler,
+ for Alpha Linux-based GNU systems.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Richard Henderson.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FP | MASK_FPREGS | MASK_GAS)
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dlinux -Dunix -Asystem(linux) -D_LONGLONG -D__alpha__ " SUB_CPP_PREDEFINES
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-lgmon} %{pg:-lc_p} %{!pg:-lc}"
+
+/* Generate calls to memcpy, etc., not bcopy, etc. */
+#define TARGET_MEM_FUNCTIONS 1
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fputs ("\tlda $28,_mcount\n\tjsr $28,($28),_mcount\n", (FILE))
+
+/* Show that we need a GP when profiling. */
+#define TARGET_PROFILING_NEEDS_GP 1
+
+/* Don't care about faults in the prologue. */
+#undef TARGET_CAN_FAULT_IN_PROLOGUE
+#define TARGET_CAN_FAULT_IN_PROLOGUE 1
diff --git a/contrib/gcc/config/alpha/netbsd-elf.h b/contrib/gcc/config/alpha/netbsd-elf.h
new file mode 100644
index 0000000..17d7bb0
--- /dev/null
+++ b/contrib/gcc/config/alpha/netbsd-elf.h
@@ -0,0 +1,31 @@
+/* Definitions of target machine for GNU compiler
+ for Alpha NetBSD systems using ELF.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (Alpha NetBSD/ELF)");
+
+#undef SUB_CPP_PREDEFINES
+#define SUB_CPP_PREDEFINES "-D__ELF__"
+
+#undef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS \
+{ "elf_dynamic_linker", ELF_DYNAMIC_LINKER },
+
+#define ELF_DYNAMIC_LINKER "/usr/libexec/ld.elf_so"
diff --git a/contrib/gcc/config/alpha/netbsd.h b/contrib/gcc/config/alpha/netbsd.h
new file mode 100644
index 0000000..054e9e0
--- /dev/null
+++ b/contrib/gcc/config/alpha/netbsd.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler,
+ for Alpha NetBSD systems.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FP | MASK_FPREGS | MASK_GAS)
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D_LONGLONG -Dnetbsd -Dunix " SUB_CPP_PREDEFINES
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{pg:-lgmon} %{pg:-lc_p} %{!pg:-lc}"
+
+/* Generate calls to memcpy, etc., not bcopy, etc. */
+#define TARGET_MEM_FUNCTIONS
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fputs ("\tlda $28,_mcount\n\tjsr $28,($28),_mcount\n", (FILE))
+
+/* Show that we need a GP when profiling. */
+#define TARGET_PROFILING_NEEDS_GP
diff --git a/contrib/gcc/config/alpha/openbsd.h b/contrib/gcc/config/alpha/openbsd.h
new file mode 100644
index 0000000..60591d5
--- /dev/null
+++ b/contrib/gcc/config/alpha/openbsd.h
@@ -0,0 +1,126 @@
+/* Configuration file for an alpha OpenBSD target.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* We settle for little endian for now. */
+#define TARGET_ENDIAN_DEFAULT 0
+
+#include <alpha/alpha.h>
+
+#define OBSD_NO_DYNAMIC_LIBRARIES
+#define OBSD_HAS_DECLARE_FUNCTION_NAME
+#define OBSD_HAS_DECLARE_FUNCTION_SIZE
+#define OBSD_HAS_DECLARE_OBJECT
+
+/* alpha ecoff supports only weak aliases, see below. */
+#define ASM_WEAKEN_LABEL(FILE,NAME) ASM_OUTPUT_WEAK_ALIAS (FILE,NAME,0)
+
+#include <openbsd.h>
+
+/* Controlling the compilation driver. */
+
+/* alpha needs __start. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e __start}}} -dc -dp %{assert*}"
+
+/* run-time target specifications */
+#define CPP_PREDEFINES "-D__unix__ -D__ANSI_COMPAT -Asystem(unix) \
+-D__OpenBSD__ -D__alpha__ -D__alpha"
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "long unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "long int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#define LOCAL_LABEL_PREFIX "."
+
+/* We don't have an init section yet. */
+#undef HAS_INIT_SECTION
+
+/* collect2 support (assembler format: macros for initialization). */
+
+/* Don't tell collect2 we use COFF as we don't have (yet ?) a dynamic ld
+ library with the proper functions to handle this -> collect2 will
+ default to using nm. */
+#undef OBJECT_FORMAT_COFF
+#undef EXTENDED_COFF
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. egcs doesn't try too hard to check internal
+ configuration files... */
+#ifdef INCOMING_RETURN_ADDR_RTX
+#undef DWARF2_UNWIND_INFO
+#define DWARF2_UNWIND_INFO 0
+#endif
+
+/* Assembler format: file framework. */
+
+/* Taken from alpha/osf.h. This used to be common to all alpha
+ configurations, but elf has departed from it.
+ Check alpha/alpha.h, alpha/osf.h for it when egcs is upgraded. */
+#ifndef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ fprintf (FILE, "\t.set volatile\n"); \
+ fprintf (FILE, "\t.set noat\n"); \
+ if (TARGET_SUPPORT_ARCH) \
+ fprintf (FILE, "\t.arch %s\n", \
+ alpha_cpu == PROCESSOR_EV6 ? "ev6" \
+ : (alpha_cpu == PROCESSOR_EV5 \
+ ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5") \
+ : "ev4")); \
+ \
+ ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
+}
+#endif
+
+/* Assembler format: label output. */
+
+#define ASM_OUTPUT_WEAK_ALIAS(FILE,NAME,VALUE) \
+ do { \
+ fputs ("\t.weakext\t", FILE); \
+ assemble_name (FILE, NAME); \
+ if (VALUE) \
+ { \
+ fputs (" , ", FILE); \
+ assemble_name (FILE, VALUE); \
+ } \
+ fputc ('\n', FILE); \
+ } while (0)
+
+
diff --git a/contrib/gcc/config/alpha/osf.h b/contrib/gcc/config/alpha/osf.h
new file mode 100644
index 0000000..956961f
--- /dev/null
+++ b/contrib/gcc/config/alpha/osf.h
@@ -0,0 +1,127 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha on OSF/1.
+ Copyright (C) 1992, 93, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* As of OSF 4.0, as can subtract adjacent labels. */
+
+#undef TARGET_AS_CAN_SUBTRACT_LABELS
+#define TARGET_AS_CAN_SUBTRACT_LABELS 1
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#define CPP_PREDEFINES "\
+-Dunix -D__osf__ -D_LONGLONG -DSYSTYPE_BSD \
+-D_SYSTYPE_BSD -Asystem(unix) -Asystem(xpg4)"
+
+/* Under OSF4, -p and -pg require -lprof1, and -lprof1 requires -lpdf. */
+
+#define LIB_SPEC "%{p:-lprof1 -lpdf} %{pg:-lprof1 -lpdf} %{a:-lprof2} -lc"
+
+/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
+ optimizing, -O1 if we are not. Pass -shared, -non_shared or
+ -call_shared as appropriate. Also pass -pg. */
+#define LINK_SPEC \
+ "-G 8 %{O*:-O3} %{!O*:-O1} %{static:-non_shared} \
+ %{!static:%{shared:-shared} %{!shared:-call_shared}} %{pg} %{taso} \
+ %{rpath*}"
+
+#define STARTFILE_SPEC \
+ "%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}}"
+
+#define MD_STARTFILE_PREFIX "/usr/lib/cmplrs/cc/"
+
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ fprintf (FILE, "\t.set volatile\n"); \
+ fprintf (FILE, "\t.set noat\n"); \
+ if (TARGET_SUPPORT_ARCH) \
+ fprintf (FILE, "\t.arch %s\n", \
+ alpha_cpu == PROCESSOR_EV6 ? "ev6" \
+ : (alpha_cpu == PROCESSOR_EV5 \
+ ? (TARGET_MAX ? "pca56" : TARGET_BWX ? "ev56" : "ev5") \
+ : "ev4")); \
+ \
+ ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
+}
+
+/* No point in running CPP on our assembler output. */
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+/* Don't pass -g to GNU as, because some versions don't accept this option. */
+#define ASM_SPEC "%{malpha-as:-g} -nocpp %{pg}"
+#else
+/* In OSF/1 v3.2c, the assembler by default does not output file names which
+ causes mips-tfile to fail. Passing -g to the assembler fixes this problem.
+ ??? Strictly speaking, we need -g only if the user specifies -g. Passing
+ it always means that we get slightly larger than necessary object files
+ if the user does not specify -g. If we don't pass -g, then mips-tfile
+ will need to be fixed to work in this case. Pass -O0 since some
+ optimization are broken and don't help us anyway. */
+#define ASM_SPEC "%{!mgas:-g} -nocpp %{pg} -O0"
+#endif
+
+/* Specify to run a post-processor, mips-tfile after the assembler
+ has run to stuff the ecoff debug information into the object file.
+ This is needed because the Alpha assembler provides no way
+ of specifying such information in the assembly file. */
+
+#if ((TARGET_DEFAULT | TARGET_CPU_DEFAULT) & MASK_GAS) != 0
+
+#define ASM_FINAL_SPEC "\
+%{malpha-as: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#else
+#define ASM_FINAL_SPEC "\
+%{!mgas: %{!mno-mips-tfile: \
+ \n mips-tfile %{v*: -v} \
+ %{K: -I %b.o~} \
+ %{!K: %{save-temps: -I %b.o~}} \
+ %{c:%W{o*}%{!o*:-o %b.o}}%{!c:-o %U.o} \
+ %{.s:%i} %{!.s:%g.s}}}"
+
+#endif
+
+/* Indicate that we have a stamp.h to use. */
+#ifndef CROSS_COMPILE
+#define HAVE_STAMP_H 1
+#endif
+
+/* Attempt to turn on access permissions for the stack. */
+
+#define TRANSFER_FROM_TRAMPOLINE \
+void \
+__enable_execute_stack (addr) \
+ void *addr; \
+{ \
+ long size = getpagesize (); \
+ long mask = ~(size-1); \
+ char *page = (char *) (((long) addr) & mask); \
+ char *end = (char *) ((((long) (addr + TRAMPOLINE_SIZE)) & mask) + size); \
+ \
+ /* 7 is PROT_READ | PROT_WRITE | PROT_EXEC */ \
+ if (mprotect (page, end - page, 7) < 0) \
+ perror ("mprotect of trampoline code"); \
+}
diff --git a/contrib/gcc/config/alpha/osf12.h b/contrib/gcc/config/alpha/osf12.h
index fe9112c..87e2111 100644
--- a/contrib/gcc/config/alpha/osf12.h
+++ b/contrib/gcc/config/alpha/osf12.h
@@ -1,5 +1,5 @@
/* Definitions of target machine for GNU compiler, for DEC Alpha.
- Copyright (C) 1992, 1993, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1993, 1995, 1996 Free Software Foundation, Inc.
Contributed by Richard Kenner (kenner@nyu.edu)
This file is part of GNU CC.
@@ -19,9 +19,6 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-
-#include "alpha/alpha.h"
-
/* In OSF 1.2, there is a linker bug that prevents use of -O3 to
the linker. */
@@ -29,3 +26,8 @@ Boston, MA 02111-1307, USA. */
#define LINK_SPEC \
"-G 8 -O1 %{static:-non_shared} %{rpath*} \
%{!static:%{shared:-shared} %{!shared:-call_shared}} %{taso}"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
diff --git a/contrib/gcc/config/alpha/osf2or3.h b/contrib/gcc/config/alpha/osf2or3.h
new file mode 100644
index 0000000..5abdb0e
--- /dev/null
+++ b/contrib/gcc/config/alpha/osf2or3.h
@@ -0,0 +1,30 @@
+/* Definitions of target machine for GNU compiler, for DEC Alpha, osf[23].
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* In OSF 2 or 3, linking with -lprof1 doesn't require -lpdf. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{p:-lprof1} %{pg:-lprof1} %{a:-lprof2} -lc"
+
+/* As of OSF 3.2, as still can't subtract adjacent labels. */
+
+#undef TARGET_AS_CAN_SUBTRACT_LABELS
+#define TARGET_AS_CAN_SUBTRACT_LABELS 0
+
diff --git a/contrib/gcc/config/alpha/t-crtbe b/contrib/gcc/config/alpha/t-crtbe
new file mode 100644
index 0000000..5e82b92
--- /dev/null
+++ b/contrib/gcc/config/alpha/t-crtbe
@@ -0,0 +1,9 @@
+# Effectively disable the crtbegin/end rules using crtstuff.c
+T = disable
+
+# Assemble startup files.
+crtbegin.o: $(srcdir)/config/alpha/crtbegin.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtbegin.o -x assembler $(srcdir)/config/alpha/crtbegin.asm
+
+crtend.o: $(srcdir)/config/alpha/crtend.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtend.o -x assembler $(srcdir)/config/alpha/crtend.asm
diff --git a/contrib/gcc/config/alpha/t-vms b/contrib/gcc/config/alpha/t-vms
new file mode 100644
index 0000000..12ac240
--- /dev/null
+++ b/contrib/gcc/config/alpha/t-vms
@@ -0,0 +1,6 @@
+# Do not build libgcc1.
+LIBGCC1 =
+CROSS_LIBGCC1 =
+
+LIB2FUNCS_EXTRA = tramp.s
+
diff --git a/contrib/gcc/config/alpha/va_list.h b/contrib/gcc/config/alpha/va_list.h
new file mode 100644
index 0000000..c9ab2b0
--- /dev/null
+++ b/contrib/gcc/config/alpha/va_list.h
@@ -0,0 +1,16 @@
+/* A replacement for Digital Unix's <va_list.h>. */
+
+#include <va-alpha.h>
+
+#if !defined(_VA_LIST) && !defined(_HIDDEN_VA_LIST)
+#define _VA_LIST
+typedef __gnuc_va_list va_list;
+
+#elif defined(_HIDDEN_VA_LIST) && !defined(_HIDDEN_VA_LIST_DONE)
+#define _HIDDEN_VA_LIST_DONE
+typedef __gnuc_va_list __va_list;
+
+#elif defined(_HIDDEN_VA_LIST) && defined(_VA_LIST)
+#undef _HIDDEN_VA_LIST
+
+#endif
diff --git a/contrib/gcc/config/alpha/vms-tramp.asm b/contrib/gcc/config/alpha/vms-tramp.asm
new file mode 100644
index 0000000..fce9ec5
--- /dev/null
+++ b/contrib/gcc/config/alpha/vms-tramp.asm
@@ -0,0 +1,22 @@
+;# New Alpha OpenVMS trampoline
+;#
+ .set noreorder
+ .set volatile
+ .set noat
+ .file 1 "tramp.s"
+.text
+ .align 3
+ .globl __tramp
+ .ent __tramp
+__tramp..en:
+
+.link
+ .align 3
+__tramp:
+ .pdesc __tramp..en,null
+.text
+ ldq $1,24($27)
+ ldq $27,16($27)
+ ldq $28,8($27)
+ jmp $31,($28),0
+ .end __tramp
diff --git a/contrib/gcc/config/alpha/vms.h b/contrib/gcc/config/alpha/vms.h
new file mode 100644
index 0000000..44cf5bf
--- /dev/null
+++ b/contrib/gcc/config/alpha/vms.h
@@ -0,0 +1,510 @@
+/* Output variables, constants and external declarations, for GNU compiler.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define OPEN_VMS 1
+
+/* This enables certain macros in alpha.h, which will make an indirect
+ reference to an external symbol an invalid address. This needs to be
+ defined before we include alpha.h, since it determines which macros
+ are used for GO_IF_*. */
+
+#define NO_EXTERNAL_INDIRECT_ADDRESS
+
+#include "alpha/alpha.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-D__ALPHA -Dvms -DVMS -D__vms__ -D__VMS__ -Asystem(vms)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{mfloat-ieee:-D__IEEE_FLOAT} \
+%{mfloat-vax:-D__G_FLOAT} \
+%{!mfloat-vax:-D__IEEE_FLOAT}"
+
+/* Under OSF4, -p and -pg require -lprof1, and -lprof1 requires -lpdf. */
+
+#define LIB_SPEC "%{p:-lprof1 -lpdf} %{pg:-lprof1 -lpdf} %{a:-lprof2} -lc"
+
+/* Pass "-G 8" to ld because Alpha's CC does. Pass -O3 if we are
+ optimizing, -O1 if we are not. Pass -shared, -non_shared or
+ -call_shared as appropriate. Also pass -pg. */
+#define LINK_SPEC \
+ "-G 8 %{O*:-O3} %{!O*:-O1} %{static:-non_shared} \
+ %{!static:%{shared:-shared} %{!shared:-call_shared}} %{pg} %{taso} \
+ %{rpath*}"
+
+/* We allow $'s in identifiers unless -ansi is used .. */
+
+#define DOLLARS_IN_IDENTIFIERS 2
+
+/* These match the definitions used in DECCRTL, the VMS C run-time library
+
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+*/
+
+/* Use memcpy for structure copying, and so forth. */
+#define TARGET_MEM_FUNCTIONS
+
+/* By default, allow $ to be part of an identifier. */
+#define DOLLARS_IN_IDENTIFIERS 2
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_FP|MASK_FPREGS|MASK_GAS)
+#undef TARGET_OPEN_VMS
+#define TARGET_OPEN_VMS 1
+
+#undef TARGET_NAME
+#define TARGET_NAME "OpenVMS/Alpha"
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (%s)", TARGET_NAME);
+
+/* The structure return address arrives as an "argument" on VMS. */
+#undef STRUCT_VALUE_REGNUM
+#define STRUCT_VALUE 0
+#undef PCC_STATIC_STRUCT_RETURN
+
+/* no floating emulation. */
+#undef REAL_ARITHMETIC
+
+/* "long" is 32 bits. */
+#undef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE 32
+
+/* Pointer is 32 bits but the hardware has 64-bit addresses, sign extended. */
+#undef POINTER_SIZE
+#define POINTER_SIZE 32
+#define POINTERS_EXTEND_UNSIGNED 0
+
+#define MAX_OFILE_ALIGNMENT 524288 /* 8 x 2^16 by DEC Ada Test CD40VRA */
+
+#undef FIXED_REGISTERS
+#define FIXED_REGISTERS \
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 }
+
+#undef CALL_USED_REGISTERS
+#define CALL_USED_REGISTERS \
+ {1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }
+
+#undef HARD_FRAME_POINTER_REGNUM
+#define HARD_FRAME_POINTER_REGNUM 29
+
+#undef CAN_ELIMINATE
+#define CAN_ELIMINATE(FROM, TO) \
+((TO) != STACK_POINTER_REGNUM || ! alpha_using_fp ())
+
+#undef INITIAL_ELIMINATION_OFFSET
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+{ if ((FROM) == FRAME_POINTER_REGNUM) \
+ (OFFSET) = alpha_sa_size () + alpha_pv_save_size (); \
+ else if ((FROM) == ARG_POINTER_REGNUM) \
+ (OFFSET) = (ALPHA_ROUND (alpha_sa_size () + alpha_pv_save_size () \
+ + get_frame_size () \
+ + current_function_pretend_args_size) \
+ - current_function_pretend_args_size); \
+ if ((TO) == STACK_POINTER_REGNUM) \
+ (OFFSET) += ALPHA_ROUND (current_function_outgoing_args_size); \
+}
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On Alpha/VMS, this is a structure that contains the number of
+ arguments and, for each argument, the datatype of that argument.
+
+ The number of arguments is a number of words of arguments scanned so far.
+ Thus 6 or more means all following args should go on the stack. */
+
+enum avms_arg_type {I64, FF, FD, FG, FS, FT};
+typedef struct {char num_args; enum avms_arg_type atypes[6];} avms_arg_info;
+
+#undef CUMULATIVE_ARGS
+#define CUMULATIVE_ARGS avms_arg_info
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+#undef INIT_CUMULATIVE_ARGS
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
+ (CUM).num_args = 0; \
+ (CUM).atypes[0] = (CUM).atypes[1] = (CUM).atypes[2] = I64; \
+ (CUM).atypes[3] = (CUM).atypes[4] = (CUM).atypes[5] = I64;
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ (TYPE is null for libcalls where that information may not be available.) */
+
+extern enum avms_arg_type alpha_arg_type ();
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode (or VOIDmode for no more args).
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+
+ On Alpha the first 6 words of args are normally in registers
+ and the rest are pushed. */
+
+extern struct rtx_def *alpha_arg_info_reg_val ();
+#undef FUNCTION_ARG
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+((MODE) == VOIDmode ? alpha_arg_info_reg_val (CUM) \
+ : ((CUM.num_args) < 6 && ! MUST_PASS_IN_STACK (MODE, TYPE) \
+ ? gen_rtx(REG, (MODE), \
+ ((CUM).num_args + 16 \
+ + ((TARGET_FPREGS \
+ && (GET_MODE_CLASS (MODE) == MODE_COMPLEX_FLOAT \
+ || GET_MODE_CLASS (MODE) == MODE_FLOAT)) \
+ * 32))) \
+ : 0))
+
+#undef FUNCTION_ARG_ADVANCE
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+ if (MUST_PASS_IN_STACK (MODE, TYPE)) \
+ (CUM).num_args += 6; \
+ else \
+ { \
+ if ((CUM).num_args < 6) \
+ (CUM).atypes[(CUM).num_args] = alpha_arg_type (MODE); \
+ \
+ (CUM).num_args += ALPHA_ARG_SIZE (MODE, TYPE, NAMED); \
+ }
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+#undef FUNCTION_ARG_PARTIAL_NREGS
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+((CUM).num_args < 6 && 6 < (CUM).num_args \
+ + ALPHA_ARG_SIZE (MODE, TYPE, NAMED) \
+ ? 6 - (CUM).num_args : 0)
+
+/* Perform any needed actions needed for a function that is receiving a
+ variable number of arguments.
+
+ CUM is as for INIT_CUMULATIVE_ARGS.
+
+ MODE and TYPE are the mode and type of the current parameter.
+
+ PRETEND_SIZE is a variable that should be set to the amount of stack
+ that must be pushed by the prolog to pretend that our caller pushed
+ it.
+
+ Normally, this macro will push all remaining incoming registers on the
+ stack and set PRETEND_SIZE to the length of the registers pushed.
+
+ For VMS, we allocate space for all 6 arg registers plus a count.
+
+ However, if NO registers need to be saved, don't allocate any space.
+ This is not only because we won't need the space, but because AP includes
+ the current_pretend_args_size and we don't want to mess up any
+ ap-relative addresses already made. */
+
+#undef SETUP_INCOMING_VARARGS
+#define SETUP_INCOMING_VARARGS(CUM,MODE,TYPE,PRETEND_SIZE,NO_RTL) \
+{ if ((CUM).num_args < 6) \
+ { \
+ if (! (NO_RTL)) \
+ { \
+ emit_move_insn (gen_rtx (REG, DImode, 1), \
+ virtual_incoming_args_rtx); \
+ emit_insn (gen_arg_home ()); \
+ } \
+ \
+ PRETEND_SIZE = 7 * UNITS_PER_WORD; \
+ } \
+}
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+{ \
+ alpha_write_verstamp (FILE); \
+ fprintf (FILE, "\t.set noreorder\n"); \
+ fprintf (FILE, "\t.set volatile\n"); \
+ ASM_OUTPUT_SOURCE_FILENAME (FILE, main_input_filename); \
+}
+
+#undef ASM_OUTPUT_FLOAT
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ { \
+ if (REAL_VALUE_ISINF (VALUE) \
+ || REAL_VALUE_ISNAN (VALUE) \
+ || REAL_VALUE_MINUS_ZERO (VALUE)) \
+ { \
+ long t; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ fprintf (FILE, "\t.long 0x%lx\n", t & 0xffffffff); \
+ } \
+ else \
+ { \
+ char str[30]; \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t.%c_floating %s\n", (TARGET_FLOAT_VAX)?'f':'s', str); \
+ } \
+ }
+
+#define LINK_SECTION_ASM_OP ".link"
+#define READONLY_SECTION_ASM_OP ".rdata"
+#define LITERALS_SECTION_ASM_OP ".literals"
+#define CTORS_SECTION_ASM_OP ".ctors"
+#define DTORS_SECTION_ASM_OP ".dtors"
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_link, in_rdata, in_literals, in_ctors, in_dtors
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+void \
+readonly_section () \
+{ \
+ if (in_section != in_rdata) \
+ { \
+ fprintf (asm_out_file, "%s\n", READONLY_SECTION_ASM_OP); \
+ in_section = in_rdata; \
+ } \
+} \
+void \
+link_section () \
+{ \
+ if (in_section != in_link) \
+ { \
+ fprintf (asm_out_file, "%s\n", LINK_SECTION_ASM_OP); \
+ in_section = in_link; \
+ } \
+} \
+void \
+literals_section () \
+{ \
+ if (in_section != in_literals) \
+ { \
+ fprintf (asm_out_file, "%s\n", LITERALS_SECTION_ASM_OP); \
+ in_section = in_literals; \
+ } \
+} \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+} \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) abort ()
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.quad $L%d\n", (VALUE))
+
+#undef READONLY_DATA_SECTION
+#define READONLY_DATA_SECTION readonly_section
+
+#define ASM_FILE_END(FILE) alpha_write_linkage (FILE);
+
+#undef CASE_VECTOR_MODE
+#define CASE_VECTOR_MODE DImode
+#undef CASE_VECTOR_PC_RELATIVE
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,TABLEINSN) \
+{ ASM_OUTPUT_ALIGN (FILE, 3); ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); }
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. */
+
+#define COMMON_ASM_OP ".comm"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "\t%s\t", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+#define NO_MD_PROTOTYPES
+
+/* Output assembler code for a block containing the constant parts
+ of a trampoline, leaving space for the variable parts.
+
+ The trampoline should set the static chain pointer to value placed
+ into the trampoline and should branch to the specified routine.
+ Note that $27 has been set to the address of the trampoline, so we can
+ use it for addressability of the two data items. Trampolines are always
+ aligned to FUNCTION_BOUNDARY, which is 64 bits. */
+
+#undef TRAMPOLINE_TEMPLATE
+#define TRAMPOLINE_TEMPLATE(FILE) \
+{ \
+ fprintf (FILE, "\t.quad 0\n"); \
+ fprintf (FILE, "\t.linkage __tramp\n"); \
+ fprintf (FILE, "\t.quad 0\n"); \
+}
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#undef TRAMPOLINE_SIZE
+#define TRAMPOLINE_SIZE 32
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+#undef INITIALIZE_TRAMPOLINE
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ alpha_initialize_trampoline (TRAMP, FNADDR, CXT, 16, 24, -1)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t.quad "); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t.quad "); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#define VALID_MACHINE_DECL_ATTRIBUTE(DECL, ATTRIBUTES, NAME, ARGS) \
+ (vms_valid_decl_attribute_p (DECL, ATTRIBUTES, NAME, ARGS))
+extern int vms_valid_decl_attribute_p ();
+
+#undef SDB_DEBUGGING_INFO
+#undef MIPS_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+
+#define DWARF2_DEBUGGING_INFO
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ fprintf (FILE, "\t.align %d\n", LOG);
+
+#define UNALIGNED_SHORT_ASM_OP ".word"
+#define UNALIGNED_INT_ASM_OP ".long"
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".quad"
+
+#define ASM_OUTPUT_SECTION(FILE,SECTION) \
+ (strcmp (SECTION, ".text") == 0) \
+ ? text_section () \
+ : named_section (NULL_TREE, SECTION, 0), \
+ ASM_OUTPUT_ALIGN (FILE, 0) \
+
+#define ASM_OUTPUT_SECTION_NAME(FILE,DECL,NAME,RELOC) \
+ do \
+ { \
+ char *flags; \
+ int ovr = 0; \
+ if (DECL && DECL_MACHINE_ATTRIBUTES (DECL) \
+ && lookup_attribute \
+ ("overlaid", DECL_MACHINE_ATTRIBUTES (DECL))) \
+ flags = ",OVR", ovr = 1; \
+ else if (strncmp (NAME,".debug", 6) == 0) \
+ flags = ",NOWRT"; \
+ else \
+ flags = ""; \
+ fputc ('\n', (FILE)); \
+ fprintf (FILE, ".section\t%s%s\n", NAME, flags); \
+ if (ovr) \
+ (NAME) = ""; \
+ } while (0)
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { literals_section(); \
+ fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef ASM_FORMAT_PRIVATE_NAME
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 12), \
+ sprintf ((OUTPUT), "%s___%d", (NAME), (LABELNO)))
+
+/* ??? VMS uses different linkage. */
+#undef ASM_OUTPUT_MI_THUNK
+
+#undef ASM_SPEC
+#undef ASM_FINAL_SPEC
+#undef LINK_SPEC
+#undef STARTFILE_SPEC
+#define ASM_SPEC "-nocpp %{pg}"
+#define LINK_SPEC "%{g3:-g3} %{g0:-g0} %{shared:-shared} %{v:-v}"
+
+/* Define the names of the division and modulus functions. */
+#define DIVSI3_LIBCALL "OTS$DIV_I"
+#define DIVDI3_LIBCALL "OTS$DIV_L"
+#define UDIVSI3_LIBCALL "OTS$DIV_UI"
+#define UDIVDI3_LIBCALL "OTS$DIV_UL"
+#define MODSI3_LIBCALL "OTS$REM_I"
+#define MODDI3_LIBCALL "OTS$REM_L"
+#define UMODSI3_LIBCALL "OTS$REM_UI"
+#define UMODDI3_LIBCALL "OTS$REM_UL"
+
+#define DIR_SEPARATOR ']'
+
+#define PREFIX "GNU_ROOT:"
diff --git a/contrib/gcc/config/alpha/vxworks.h b/contrib/gcc/config/alpha/vxworks.h
new file mode 100644
index 0000000..6dee4b3
--- /dev/null
+++ b/contrib/gcc/config/alpha/vxworks.h
@@ -0,0 +1,51 @@
+/* Definitions of target machine for GNU compiler. Vxworks Alpha version.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file just exists to give specs for the Alpha running on VxWorks. */
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{mvxsim:-DCPU=SIMALPHADUNIX} \
+%{!mvxsim: %{!mcpu*|mcpu=21064:-DCPU=21064} %{mcpu=21164:-DCPU=21164}} \
+%{posix: -D_POSIX_SOURCE}"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "\
+-D__vxworks -D__alpha_vxworks -Asystem(vxworks) \
+-Asystem(embedded) -D_LONGLONG"
+
+/* VxWorks does all the library stuff itself. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* VxWorks uses object files, not loadable images. make linker just
+ combine objects. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-r"
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
diff --git a/contrib/gcc/config/alpha/x-alpha b/contrib/gcc/config/alpha/x-alpha
index 9919747..9686ab9 100644
--- a/contrib/gcc/config/alpha/x-alpha
+++ b/contrib/gcc/config/alpha/x-alpha
@@ -1 +1,2 @@
CLIB=-lmld
+EXTRA_HEADERS = $(srcdir)/config/alpha/va_list.h
diff --git a/contrib/gcc/config/alpha/xm-alpha.h b/contrib/gcc/config/alpha/xm-alpha.h
index 642e1cf..7665127 100644
--- a/contrib/gcc/config/alpha/xm-alpha.h
+++ b/contrib/gcc/config/alpha/xm-alpha.h
@@ -41,12 +41,18 @@ Boston, MA 02111-1307, USA. */
#define SUCCESS_EXIT_CODE 0
#define FATAL_EXIT_CODE 33
-/* If not compiled with GNU C, use the builtin alloca. */
-#if !defined(__GNUC__) && !defined(_WIN32)
+/* If compiled with GNU C, use the builtin alloca. */
+#ifndef alloca
+#if defined(__GNUC__) && !defined(USE_C_ALLOCA)
+#define alloca __builtin_alloca
+#else
+#if !defined(_WIN32) && !defined(USE_C_ALLOCA) && !defined(OPEN_VMS)
#include <alloca.h>
#else
extern void *alloca ();
#endif
+#endif
+#endif
/* The host compiler has problems with enum bitfields since it makes
them signed so we can't fit all our codes in. */
@@ -65,14 +71,6 @@ extern void *malloc (), *realloc (), *calloc ();
#include "string.h"
#endif
-/* OSF/1 has vprintf. */
-
-#define HAVE_VPRINTF
-
-/* OSF/1 has putenv. */
-
-#define HAVE_PUTENV
-
/* OSF/1 is POSIX.1 compliant. */
#define POSIX
diff --git a/contrib/gcc/config/alpha/xm-openbsd.h b/contrib/gcc/config/alpha/xm-openbsd.h
new file mode 100644
index 0000000..50f4366
--- /dev/null
+++ b/contrib/gcc/config/alpha/xm-openbsd.h
@@ -0,0 +1,23 @@
+/* Configuration file for an host running alpha OpenBSD.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-openbsd.h>
+#include <alpha/xm-alpha.h>
+
diff --git a/contrib/gcc/config/alpha/xm-vms.h b/contrib/gcc/config/alpha/xm-vms.h
new file mode 100644
index 0000000..472a225
--- /dev/null
+++ b/contrib/gcc/config/alpha/xm-vms.h
@@ -0,0 +1,93 @@
+/* Configuration for GNU C-compiler for openVMS/Alpha.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Klaus Kaempf (kkaempf@progis.de).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* If compiling with DECC, need to fix problem with <stdio.h>
+ which defines a macro called FILE_TYPE that breaks "tree.h".
+ Fortunately it uses #ifndef to suppress multiple inclusions.
+ Three possible cases:
+ 1) <stdio.h> has already been included -- ours will be no-op;
+ 2) <stdio.h> will be included after us -- "theirs" will be no-op;
+ 3) <stdio.h> isn't needed -- including it here shouldn't hurt.
+ In all three cases, the problem macro will be removed here. */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef __DECC
+#undef FILE_TYPE
+#endif
+
+#undef HOST_BITS_PER_LONG
+#define HOST_BITS_PER_LONG 32
+
+#define HOST_WIDE_INT long long
+#define HOST_BITS_PER_WIDE_INT 64
+
+#undef SUCCESS_EXIT_CODE
+#define SUCCESS_EXIT_CODE 1
+#undef FATAL_EXIT_CODE
+#define FATAL_EXIT_CODE (44 | 0x10000000) /* Abort, and no DCL message. */
+
+/* A couple of conditionals for execution machine are controlled here. */
+#ifndef VMS
+#define VMS
+#endif
+
+#define GCC_INCLUDE_DIR ""
+/* Specify the list of include file directories. */
+#define INCLUDE_DEFAULTS \
+{ \
+ { "GNU_GXX_INCLUDE:", "G++", 1, 1 }, \
+ { "GNU_CC_INCLUDE:", "GCC", 0, 0 }, \
+ { ".", 0, 0, 1 }, \
+ { 0, 0, 0, 0 } \
+}
+
+/* Define a local equivalent (sort of) for unlink */
+#define unlink remove
+
+#define NEED_ATEXIT
+#define HAVE_VPRINTF
+#define HAVE_PUTENV
+#define HAVE_STRERROR
+#define HAVE_ATOLL
+
+#define NO_SYS_PARAMS_H /* Don't have <sys/params.h> */
+#define USE_C_ALLOCA /* Using alloca.c */
+
+#define HAVE_FCNTL_H 1
+#define HAVE_STDLIB_H 1
+#define HAVE_UNISTD_H 1
+#define HAVE_STRING_H 1
+#define HAVE_LIMITS_H 1
+#define HAVE_STDDEF_H 1
+#define HAVE_TIME_H 1
+#define STDC_HEADERS 1
+#define HAVE_CPP_STRINGIFY 1
+
+#if __STDC__
+extern void *alloca (size_t);
+#else
+extern char *alloca (unsigned int);
+#endif
+
+#define OBJECT_SUFFIX ".obj"
+#define EXECUTABLE_SUFFIX ".exe"
diff --git a/contrib/gcc/config/aoutos.h b/contrib/gcc/config/aoutos.h
index e9caa71..6f4e262 100644
--- a/contrib/gcc/config/aoutos.h
+++ b/contrib/gcc/config/aoutos.h
@@ -39,50 +39,3 @@ Boston, MA 02111-1307, USA. */
/* Define a symbol indicating that we are using aoutos.h. */
#define USING_AOUTOS_H
-
-/* A C statement (sans semicolon) to output an element in the table of
- global constructors.
- If using GNU LD, tell it that this is part of the static destructor set.
- This code works for any machine provided you use GNU as/ld.
- If not using GNU LD, rely on a "collect" program to look for names defined
- in the particular form we choose as global constructor function names. */
-
-#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
- do { \
- if (flag_gnu_linker) \
- { \
- /* Output an N_SETT (0x16, 22.) for the name. */ \
- fprintf (FILE, "%s \"___CTOR_LIST__\",22,0,0,", ASM_STABS_OP); \
- assemble_name (FILE, NAME); \
- fputc ('\n', FILE); \
- } \
- } while (0)
-
-
-/* A C statement (sans semicolon) to output an element in the table of
- global destructors. */
-
-#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
- do { \
- if (flag_gnu_linker) \
- { \
- /* Output an N_SETT (0x16, 22.) for the name. */ \
- fprintf (FILE, "%s \"___DTOR_LIST__\",22,0,0,", ASM_STABS_OP); \
- assemble_name (FILE, NAME); \
- fputc ('\n', FILE); \
- } \
- } while (0)
-
-/* Likewise for entries we want to record for garbage collection.
- Garbage collection is still under development. */
-
-#define ASM_OUTPUT_GC_ENTRY(FILE,NAME) \
- do { \
- if (flag_gnu_linker) \
- { \
- /* Output an N_SETT (0x16, 22.) for the name. */ \
- fprintf (FILE, "%s \"___PTR_LIST__\",22,0,0,", ASM_STABS_OP); \
- assemble_name (FILE, NAME); \
- fputc ('\n', FILE); \
- } \
- } while (0)
diff --git a/contrib/gcc/config/dbx.h b/contrib/gcc/config/dbx.h
new file mode 100644
index 0000000..c5cd3b5
--- /dev/null
+++ b/contrib/gcc/config/dbx.h
@@ -0,0 +1,30 @@
+/* Prefer DBX (stabs) debugging information.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file causes gcc to prefer using DBX (stabs) debugging
+ information. The configure script will add a #include of this file
+ to tm.h when --with-stabs is used for certain targets. */
+
+#ifndef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+#endif
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
diff --git a/contrib/gcc/config/dbxcoff.h b/contrib/gcc/config/dbxcoff.h
new file mode 100644
index 0000000..9497a70
--- /dev/null
+++ b/contrib/gcc/config/dbxcoff.h
@@ -0,0 +1,87 @@
+/* Definitions needed when using stabs embedded in COFF sections.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This file may be included by any COFF target which wishes to
+ support -gstabs generating stabs in sections, as produced by gas
+ and understood by gdb. */
+
+/* Output DBX (stabs) debugging information if doing -gstabs. */
+
+#undef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+/* Generate SDB debugging information by default. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE SDB_DEBUG
+#endif
+
+/* Be function-relative for block and source line stab directives. */
+
+#undef DBX_BLOCKS_FUNCTION_RELATIVE
+#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+
+/* but, to make this work, functions must appear prior to line info. */
+
+#undef DBX_FUNCTION_FIRST
+#define DBX_FUNCTION_FIRST
+
+/* Generate a blank trailing N_SO to mark the end of the .o file, since
+ we can't depend upon the linker to mark .o file boundaries with
+ embedded stabs. */
+
+#undef DBX_OUTPUT_MAIN_SOURCE_FILE_END
+#define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \
+ fprintf (FILE, \
+ "\t.text\n\t.stabs \"\",%d,0,0,Letext\nLetext:\n", N_SO)
+
+/* Like block addresses, stabs line numbers are relative to the
+ current function. */
+
+#undef ASM_OUTPUT_SOURCE_LINE
+#define ASM_OUTPUT_SOURCE_LINE(FILE, LINE) \
+{ if (write_symbols == SDB_DEBUG) { \
+ fprintf ((FILE), "\t.ln\t%d\n", \
+ ((sdb_begin_function_line > -1) \
+ ? (LINE) - sdb_begin_function_line : 1)); \
+ } else if (write_symbols == DBX_DEBUG) { \
+ static int sym_lineno = 1; \
+ char buffer[256]; \
+ ASM_GENERATE_INTERNAL_LABEL (buffer, "LM", sym_lineno); \
+ fprintf (FILE, ".stabn 68,0,%d,", LINE); \
+ assemble_name (FILE, buffer); \
+ putc ('-', FILE); \
+ assemble_name (FILE, \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0)); \
+ putc ('\n', FILE); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "LM", sym_lineno); \
+ sym_lineno++; \
+ } }
+
+/* When generating stabs debugging, use N_BINCL entries. */
+
+#undef DBX_USE_BINCL
+#define DBX_USE_BINCL
+
+/* There is no limit to the length of stabs strings. */
+
+#ifndef DBX_CONTIN_LENGTH
+#define DBX_CONTIN_LENGTH 0
+#endif
diff --git a/contrib/gcc/config/float-i128.h b/contrib/gcc/config/float-i128.h
new file mode 100644
index 0000000..6a9dd48
--- /dev/null
+++ b/contrib/gcc/config/float-i128.h
@@ -0,0 +1,96 @@
+/* float.h for target with IEEE 32, 64 and 128 bit floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 113
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 33
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 1.925929944387235853055977942584927319E-34L
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-16381)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 3.362103143112093506262677817321752603E-4932L
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-4931)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 16384
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.189731495357231765085759326628007016E+4932L
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 4932
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/float-i32.h b/contrib/gcc/config/float-i32.h
new file mode 100644
index 0000000..c834926
--- /dev/null
+++ b/contrib/gcc/config/float-i32.h
@@ -0,0 +1,96 @@
+/* float.h for target with only IEEE 32 bit floating point format */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 24
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 6
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-125)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 128
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 24
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 6
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-125)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 128
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 38
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/float-i386.h b/contrib/gcc/config/float-i386.h
new file mode 100644
index 0000000..2d14f70
--- /dev/null
+++ b/contrib/gcc/config/float-i386.h
@@ -0,0 +1,104 @@
+/* float.h for target with IEEE 32/64 bit and Intel 386 style 80 bit
+ floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 64
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 18
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#ifndef __LDBL_UNION__
+#define __LDBL_UNION__
+union __convert_long_double {
+ unsigned __convert_long_double_i[4];
+ long double __convert_long_double_d;
+};
+#endif
+#define LDBL_EPSILON (__extension__ ((union __convert_long_double) {__convert_long_double_i: {0x0, 0x80000000, 0x3fc0, 0x0}}).__convert_long_double_d)
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-16381)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN (__extension__ ((union __convert_long_double) {__convert_long_double_i: {0x0, 0x80000000, 0x1, 0x0}}).__convert_long_double_d)
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-4931)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 16384
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX (__extension__ ((union __convert_long_double) {__convert_long_double_i: {0xffffffff, 0xffffffff, 0x107ffe, 0x0}}).__convert_long_double_d)
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 4932
+
+#endif /* _FLOAT_H___ */
diff --git a/contrib/gcc/config/float-i64.h b/contrib/gcc/config/float-i64.h
new file mode 100644
index 0000000..7dbe4e9
--- /dev/null
+++ b/contrib/gcc/config/float-i64.h
@@ -0,0 +1,96 @@
+/* float.h for target with IEEE 32 bit and 64 bit floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 15
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 2.2204460492503131e-16L
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-1021)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 2.2250738585072014e-308L
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 1024
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.7976931348623157e+308L
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 308
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/float-m68k.h b/contrib/gcc/config/float-m68k.h
new file mode 100644
index 0000000..b36d447
--- /dev/null
+++ b/contrib/gcc/config/float-m68k.h
@@ -0,0 +1,97 @@
+/* float.h for target with IEEE 32 bit and 64 bit and Motorola style 96 bit
+ floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 64
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 18
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 1.08420217248550443401e-19L
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-16382)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 1.68105157155604675313e-4932L
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-4931)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 16384
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.18973149535723176502e+4932L
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 4932
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/float-sh.h b/contrib/gcc/config/float-sh.h
new file mode 100644
index 0000000..9a94298
--- /dev/null
+++ b/contrib/gcc/config/float-sh.h
@@ -0,0 +1,130 @@
+/* float.h for target sh3e with optional IEEE 32 bit double format */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-125)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 128
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+#ifdef __SH3E__
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 24
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 6
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-125)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 1.17549435e-38F
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-37)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 128
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 3.40282347e+38F
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 38
+
+#else
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 15
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-1021)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 1024
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 308
+
+#endif
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 53
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 15
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 2.2204460492503131e-16
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-1021)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 2.2250738585072014e-308
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-307)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 1024
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.7976931348623157e+308
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 308
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/float-vax.h b/contrib/gcc/config/float-vax.h
new file mode 100644
index 0000000..3c87f79
--- /dev/null
+++ b/contrib/gcc/config/float-vax.h
@@ -0,0 +1,96 @@
+/* float.h for target with VAX floating point formats */
+#ifndef _FLOAT_H_
+#define _FLOAT_H_
+/* Produced by enquire version 4.3, CWI, Amsterdam */
+
+ /* Radix of exponent representation */
+#undef FLT_RADIX
+#define FLT_RADIX 2
+ /* Number of base-FLT_RADIX digits in the significand of a float */
+#undef FLT_MANT_DIG
+#define FLT_MANT_DIG 24
+ /* Number of decimal digits of precision in a float */
+#undef FLT_DIG
+#define FLT_DIG 6
+ /* Addition rounds to 0: zero, 1: nearest, 2: +inf, 3: -inf, -1: unknown */
+#undef FLT_ROUNDS
+#define FLT_ROUNDS 1
+ /* Difference between 1.0 and the minimum float greater than 1.0 */
+#undef FLT_EPSILON
+#define FLT_EPSILON 1.19209290e-07F
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised float */
+#undef FLT_MIN_EXP
+#define FLT_MIN_EXP (-127)
+ /* Minimum normalised float */
+#undef FLT_MIN
+#define FLT_MIN 2.93873588e-39F
+ /* Minimum int x such that 10**x is a normalised float */
+#undef FLT_MIN_10_EXP
+#define FLT_MIN_10_EXP (-38)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable float */
+#undef FLT_MAX_EXP
+#define FLT_MAX_EXP 127
+ /* Maximum float */
+#undef FLT_MAX
+#define FLT_MAX 1.70141173e+38F
+ /* Maximum int x such that 10**x is a representable float */
+#undef FLT_MAX_10_EXP
+#define FLT_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a double */
+#undef DBL_MANT_DIG
+#define DBL_MANT_DIG 56
+ /* Number of decimal digits of precision in a double */
+#undef DBL_DIG
+#define DBL_DIG 16
+ /* Difference between 1.0 and the minimum double greater than 1.0 */
+#undef DBL_EPSILON
+#define DBL_EPSILON 2.77555756156289135e-17
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised double */
+#undef DBL_MIN_EXP
+#define DBL_MIN_EXP (-127)
+ /* Minimum normalised double */
+#undef DBL_MIN
+#define DBL_MIN 2.93873587705571877e-39
+ /* Minimum int x such that 10**x is a normalised double */
+#undef DBL_MIN_10_EXP
+#define DBL_MIN_10_EXP (-38)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable double */
+#undef DBL_MAX_EXP
+#define DBL_MAX_EXP 127
+ /* Maximum double */
+#undef DBL_MAX
+#define DBL_MAX 1.70141183460469229e+38
+ /* Maximum int x such that 10**x is a representable double */
+#undef DBL_MAX_10_EXP
+#define DBL_MAX_10_EXP 38
+
+ /* Number of base-FLT_RADIX digits in the significand of a long double */
+#undef LDBL_MANT_DIG
+#define LDBL_MANT_DIG 56
+ /* Number of decimal digits of precision in a long double */
+#undef LDBL_DIG
+#define LDBL_DIG 16
+ /* Difference between 1.0 and the minimum long double greater than 1.0 */
+#undef LDBL_EPSILON
+#define LDBL_EPSILON 2.77555756156289135e-17
+ /* Minimum int x such that FLT_RADIX**(x-1) is a normalised long double */
+#undef LDBL_MIN_EXP
+#define LDBL_MIN_EXP (-127)
+ /* Minimum normalised long double */
+#undef LDBL_MIN
+#define LDBL_MIN 2.93873587705571877e-39
+ /* Minimum int x such that 10**x is a normalised long double */
+#undef LDBL_MIN_10_EXP
+#define LDBL_MIN_10_EXP (-38)
+ /* Maximum int x such that FLT_RADIX**(x-1) is a representable long double */
+#undef LDBL_MAX_EXP
+#define LDBL_MAX_EXP 127
+ /* Maximum long double */
+#undef LDBL_MAX
+#define LDBL_MAX 1.70141183460469229e+38
+ /* Maximum int x such that 10**x is a representable long double */
+#undef LDBL_MAX_10_EXP
+#define LDBL_MAX_10_EXP 38
+
+#endif /* _FLOAT_H_ */
diff --git a/contrib/gcc/config/fp-bit.c b/contrib/gcc/config/fp-bit.c
index 4ee08f1..f4a1e2a 100644
--- a/contrib/gcc/config/fp-bit.c
+++ b/contrib/gcc/config/fp-bit.c
@@ -1,8 +1,7 @@
/* This is a software floating point library which can be used instead of
the floating point routines in libgcc1.c for targets without hardware
- floating point. */
-
-/* Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+ floating point.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
@@ -44,6 +43,55 @@ Boston, MA 02111-1307, USA. */
/* The intended way to use this file is to make two copies, add `#define FLOAT'
to one copy, then compile both copies and add them to libgcc.a. */
+/* Defining FINE_GRAINED_LIBRARIES allows one to select which routines
+ from this file are compiled via additional -D options.
+
+ This avoids the need to pull in the entire fp emulation library
+ when only a small number of functions are needed.
+
+ If FINE_GRAINED_LIBRARIES is not defined, then compile every
+ suitable routine. */
+#ifndef FINE_GRAINED_LIBRARIES
+#define L_pack_df
+#define L_unpack_df
+#define L_pack_sf
+#define L_unpack_sf
+#define L_addsub_sf
+#define L_addsub_df
+#define L_mul_sf
+#define L_mul_df
+#define L_div_sf
+#define L_div_df
+#define L_fpcmp_parts_sf
+#define L_fpcmp_parts_df
+#define L_compare_sf
+#define L_compare_df
+#define L_eq_sf
+#define L_eq_df
+#define L_ne_sf
+#define L_ne_df
+#define L_gt_sf
+#define L_gt_df
+#define L_ge_sf
+#define L_ge_df
+#define L_lt_sf
+#define L_lt_df
+#define L_le_sf
+#define L_le_df
+#define L_si_to_sf
+#define L_si_to_df
+#define L_sf_to_si
+#define L_df_to_si
+#define L_f_to_usi
+#define L_df_to_usi
+#define L_negate_sf
+#define L_negate_df
+#define L_make_sf
+#define L_make_df
+#define L_sf_to_df
+#define L_df_to_sf
+#endif
+
/* The following macros can be defined to change the behaviour of this file:
FLOAT: Implement a `float', aka SFmode, fp library. If this is not
defined, then this file implements a `double', aka DFmode, fp library.
@@ -62,6 +110,52 @@ Boston, MA 02111-1307, USA. */
SMALL_MACHINE: Useful when operations on QIs and HIs are faster
than on an SI */
+/* We don't currently support extended floats (long doubles) on machines
+ without hardware to deal with them.
+
+ These stubs are just to keep the linker from complaining about unresolved
+ references which can be pulled in from libio & libstdc++, even if the
+ user isn't using long doubles. However, they may generate an unresolved
+ external to abort if abort is not used by the function, and the stubs
+ are referenced from within libc, since libgcc goes before and after the
+ system library. */
+
+#ifdef EXTENDED_FLOAT_STUBS
+__truncxfsf2 (){ abort(); }
+__extendsfxf2 (){ abort(); }
+__addxf3 (){ abort(); }
+__divxf3 (){ abort(); }
+__eqxf2 (){ abort(); }
+__extenddfxf2 (){ abort(); }
+__gtxf2 (){ abort(); }
+__lexf2 (){ abort(); }
+__ltxf2 (){ abort(); }
+__mulxf3 (){ abort(); }
+__negxf2 (){ abort(); }
+__nexf2 (){ abort(); }
+__subxf3 (){ abort(); }
+__truncxfdf2 (){ abort(); }
+
+__trunctfsf2 (){ abort(); }
+__extendsftf2 (){ abort(); }
+__addtf3 (){ abort(); }
+__divtf3 (){ abort(); }
+__eqtf2 (){ abort(); }
+__extenddftf2 (){ abort(); }
+__gttf2 (){ abort(); }
+__letf2 (){ abort(); }
+__lttf2 (){ abort(); }
+__multf3 (){ abort(); }
+__negtf2 (){ abort(); }
+__netf2 (){ abort(); }
+__subtf3 (){ abort(); }
+__trunctfdf2 (){ abort(); }
+__gexf2 (){ abort(); }
+__fixxfsi (){ abort(); }
+__floatsixf (){ abort(); }
+#else /* !EXTENDED_FLOAT_STUBS, rest of file */
+
+
typedef SFtype __attribute__ ((mode (SF)));
typedef DFtype __attribute__ ((mode (DF)));
@@ -99,8 +193,9 @@ typedef unsigned int UDItype __attribute__ ((mode (DI)));
# define FRAC_NBITS 32
# define FRACHIGH 0x80000000L
# define FRACHIGH2 0xc0000000L
-# define pack_d pack_f
-# define unpack_d unpack_f
+# define pack_d __pack_f
+# define unpack_d __unpack_f
+# define __fpcmp_parts __fpcmp_parts_f
typedef USItype fractype;
typedef UHItype halffractype;
typedef SFtype FLO_type;
@@ -121,6 +216,9 @@ typedef unsigned int UDItype __attribute__ ((mode (DI)));
# define FRAC_NBITS 64
# define FRACHIGH 0x8000000000000000LL
# define FRACHIGH2 0xc000000000000000LL
+# define pack_d __pack_d
+# define unpack_d __unpack_d
+# define __fpcmp_parts __fpcmp_parts_d
typedef UDItype fractype;
typedef USItype halffractype;
typedef DFtype FLO_type;
@@ -191,7 +289,9 @@ typedef unsigned int UDItype __attribute__ ((mode (DI)));
#endif
+#ifndef INLINE
#define INLINE __inline__
+#endif
/* Preserve the sticky-bit when shifting fractions to the right. */
#define LSHIFT(a) { a = (a & 1) | (a >> 1); }
@@ -199,7 +299,7 @@ typedef unsigned int UDItype __attribute__ ((mode (DI)));
/* numeric parameters */
/* F_D_BITOFF is the number of bits offset between the MSB of the mantissa
of a float and of a double. Assumes there are only two float types.
- (double::FRAC_BITS+double::NGARGS-(float::FRAC_BITS-float::NGARDS))
+ (double::FRAC_BITS+double::NGARDS-(float::FRAC_BITS-float::NGARDS))
*/
#define F_D_BITOFF (52+8-(23+7))
@@ -328,7 +428,10 @@ flip_sign ( fp_number_type * x)
x->sign = !x->sign;
}
-static FLO_type
+extern FLO_type pack_d ( fp_number_type * );
+
+#if defined(L_pack_df) || defined(L_pack_sf)
+FLO_type
pack_d ( fp_number_type * src)
{
FLO_union_type dst;
@@ -414,7 +517,7 @@ pack_d ( fp_number_type * src)
}
/* We previously used bitfields to store the number, but this doesn't
- handle little/big endian systems conviently, so use shifts and
+ handle little/big endian systems conveniently, so use shifts and
masks */
#ifdef FLOAT_BIT_ORDER_MISMATCH
dst.bits.fraction = fraction;
@@ -436,12 +539,16 @@ pack_d ( fp_number_type * src)
return dst.value;
}
+#endif
-static void
+extern void unpack_d (FLO_union_type *, fp_number_type *);
+
+#if defined(L_unpack_df) || defined(L_unpack_sf)
+void
unpack_d (FLO_union_type * src, fp_number_type * dst)
{
/* We previously used bitfields to store the number, but this doesn't
- handle little/big endian systems conviently, so use shifts and
+ handle little/big endian systems conveniently, so use shifts and
masks */
fractype fraction;
int exp;
@@ -504,13 +611,13 @@ unpack_d (FLO_union_type * src, fp_number_type * dst)
else
{
/* Non zero fraction, means nan */
- if (sign)
+ if (fraction & QUIET_NAN)
{
- dst->class = CLASS_SNAN;
+ dst->class = CLASS_QNAN;
}
else
{
- dst->class = CLASS_QNAN;
+ dst->class = CLASS_SNAN;
}
/* Keep the fraction part as the nan number */
dst->fraction.ll = fraction;
@@ -524,7 +631,9 @@ unpack_d (FLO_union_type * src, fp_number_type * dst)
dst->fraction.ll = (fraction << NGARDS) | IMPLICIT_1;
}
}
+#endif
+#if defined(L_addsub_sf) || defined(L_addsub_df)
static fp_number_type *
_fpadd_parts (fp_number_type * a,
fp_number_type * b,
@@ -559,6 +668,12 @@ _fpadd_parts (fp_number_type * a,
}
if (iszero (b))
{
+ if (iszero (a))
+ {
+ *tmp = *a;
+ tmp->sign = a->sign & b->sign;
+ return tmp;
+ }
return a;
}
if (iszero (a))
@@ -692,8 +807,10 @@ sub (FLO_type arg_a, FLO_type arg_b)
return pack_d (res);
}
+#endif
-static fp_number_type *
+#if defined(L_mul_sf) || defined(L_mul_df)
+static INLINE fp_number_type *
_fpmul_parts ( fp_number_type * a,
fp_number_type * b,
fp_number_type * tmp)
@@ -741,13 +858,13 @@ _fpmul_parts ( fp_number_type * a,
/* Calculate the mantissa by multiplying both 64bit numbers to get a
128 bit number */
{
- fractype x = a->fraction.ll;
- fractype ylow = b->fraction.ll;
- fractype yhigh = 0;
- int bit;
-
#if defined(NO_DI_MODE)
{
+ fractype x = a->fraction.ll;
+ fractype ylow = b->fraction.ll;
+ fractype yhigh = 0;
+ int bit;
+
/* ??? This does multiplies one bit at a time. Optimize. */
for (bit = 0; bit < FRAC_NBITS; bit++)
{
@@ -878,20 +995,18 @@ multiply (FLO_type arg_a, FLO_type arg_b)
return pack_d (res);
}
+#endif
-static fp_number_type *
+#if defined(L_div_sf) || defined(L_div_df)
+static INLINE fp_number_type *
_fpdiv_parts (fp_number_type * a,
fp_number_type * b,
fp_number_type * tmp)
{
- fractype low = 0;
- fractype high = 0;
- fractype r0, r1, y0, y1, bit;
- fractype q;
+ fractype bit;
fractype numerator;
fractype denominator;
fractype quotient;
- fractype remainder;
if (isnan (a))
{
@@ -901,13 +1016,15 @@ _fpdiv_parts (fp_number_type * a,
{
return b;
}
+
+ a->sign = a->sign ^ b->sign;
+
if (isinf (a) || iszero (a))
{
if (a->class == b->class)
return nan ();
return a;
}
- a->sign = a->sign ^ b->sign;
if (isinf (b))
{
@@ -918,15 +1035,12 @@ _fpdiv_parts (fp_number_type * a,
if (iszero (b))
{
a->class = CLASS_INFINITY;
- return b;
+ return a;
}
/* Calculate the mantissa by multiplying both 64bit numbers to get a
128 bit number */
{
- int carry;
- intfrac d0, d1; /* weren't unsigned before ??? */
-
/* quotient =
( numerator / denominator) * 2^(numerator exponent - denominator exponent)
*/
@@ -989,15 +1103,19 @@ divide (FLO_type arg_a, FLO_type arg_b)
return pack_d (res);
}
+#endif
+int __fpcmp_parts (fp_number_type * a, fp_number_type *b);
+
+#if defined(L_fpcmp_parts_sf) || defined(L_fpcmp_parts_df)
/* according to the demo, fpcmp returns a comparison with 0... thus
a<b -> -1
a==b -> 0
a>b -> +1
*/
-static int
-_fpcmp_parts (fp_number_type * a, fp_number_type * b)
+int
+__fpcmp_parts (fp_number_type * a, fp_number_type * b)
{
#if 0
/* either nan -> unordered. Must be checked outside of this routine. */
@@ -1072,7 +1190,9 @@ _fpcmp_parts (fp_number_type * a, fp_number_type * b)
/* after all that, they're equal. */
return 0;
}
+#endif
+#if defined(L_compare_sf) || defined(L_compare_df)
CMPtype
compare (FLO_type arg_a, FLO_type arg_b)
{
@@ -1082,13 +1202,15 @@ compare (FLO_type arg_a, FLO_type arg_b)
unpack_d ((FLO_union_type *) & arg_a, &a);
unpack_d ((FLO_union_type *) & arg_b, &b);
- return _fpcmp_parts (&a, &b);
+ return __fpcmp_parts (&a, &b);
}
+#endif
#ifndef US_SOFTWARE_GOFAST
/* These should be optimized for their specific tasks someday. */
+#if defined(L_eq_sf) || defined(L_eq_df)
CMPtype
_eq_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1101,9 +1223,11 @@ _eq_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return 1; /* false, truth == 0 */
- return _fpcmp_parts (&a, &b) ;
+ return __fpcmp_parts (&a, &b) ;
}
+#endif
+#if defined(L_ne_sf) || defined(L_ne_df)
CMPtype
_ne_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1116,9 +1240,11 @@ _ne_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return 1; /* true, truth != 0 */
- return _fpcmp_parts (&a, &b) ;
+ return __fpcmp_parts (&a, &b) ;
}
+#endif
+#if defined(L_gt_sf) || defined(L_gt_df)
CMPtype
_gt_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1131,9 +1257,11 @@ _gt_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return -1; /* false, truth > 0 */
- return _fpcmp_parts (&a, &b);
+ return __fpcmp_parts (&a, &b);
}
+#endif
+#if defined(L_ge_sf) || defined(L_ge_df)
CMPtype
_ge_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1145,9 +1273,11 @@ _ge_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return -1; /* false, truth >= 0 */
- return _fpcmp_parts (&a, &b) ;
+ return __fpcmp_parts (&a, &b) ;
}
+#endif
+#if defined(L_lt_sf) || defined(L_lt_df)
CMPtype
_lt_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1160,9 +1290,11 @@ _lt_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return 1; /* false, truth < 0 */
- return _fpcmp_parts (&a, &b);
+ return __fpcmp_parts (&a, &b);
}
+#endif
+#if defined(L_le_sf) || defined(L_le_df)
CMPtype
_le_f2 (FLO_type arg_a, FLO_type arg_b)
{
@@ -1175,11 +1307,13 @@ _le_f2 (FLO_type arg_a, FLO_type arg_b)
if (isnan (&a) || isnan (&b))
return 1; /* false, truth <= 0 */
- return _fpcmp_parts (&a, &b) ;
+ return __fpcmp_parts (&a, &b) ;
}
+#endif
#endif /* ! US_SOFTWARE_GOFAST */
+#if defined(L_si_to_sf) || defined(L_si_to_df)
FLO_type
si_to_float (SItype arg_a)
{
@@ -1215,7 +1349,9 @@ si_to_float (SItype arg_a)
}
return pack_d (&in);
}
+#endif
+#if defined(L_sf_to_si) || defined(L_df_to_si)
SItype
float_to_si (FLO_type arg_a)
{
@@ -1229,7 +1365,7 @@ float_to_si (FLO_type arg_a)
return 0;
/* get reasonable MAX_SI_INT... */
if (isinf (&a))
- return a.sign ? MAX_SI_INT : (-MAX_SI_INT)-1;
+ return a.sign ? (-MAX_SI_INT)-1 : MAX_SI_INT;
/* it is a number, but a small one */
if (a.normal_exp < 0)
return 0;
@@ -1238,7 +1374,9 @@ float_to_si (FLO_type arg_a)
tmp = a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
return a.sign ? (-tmp) : (tmp);
}
+#endif
+#if defined(L_sf_to_usi) || defined(L_df_to_usi)
#ifdef US_SOFTWARE_GOFAST
/* While libgcc2.c defines its own __fixunssfsi and __fixunsdfsi routines,
we also define them for GOFAST because the ones in libgcc2.c have the
@@ -1256,24 +1394,26 @@ float_to_usi (FLO_type arg_a)
return 0;
if (isnan (&a))
return 0;
- /* get reasonable MAX_USI_INT... */
- if (isinf (&a))
- return a.sign ? MAX_USI_INT : 0;
/* it is a negative number */
if (a.sign)
return 0;
+ /* get reasonable MAX_USI_INT... */
+ if (isinf (&a))
+ return MAX_USI_INT;
/* it is a number, but a small one */
if (a.normal_exp < 0)
return 0;
if (a.normal_exp > 31)
return MAX_USI_INT;
else if (a.normal_exp > (FRACBITS + NGARDS))
- return a.fraction.ll << ((FRACBITS + NGARDS) - a.normal_exp);
+ return a.fraction.ll << (a.normal_exp - (FRACBITS + NGARDS));
else
return a.fraction.ll >> ((FRACBITS + NGARDS) - a.normal_exp);
}
#endif
+#endif
+#if defined(L_negate_sf) || defined(L_negate_df)
FLO_type
negate (FLO_type arg_a)
{
@@ -1283,9 +1423,11 @@ negate (FLO_type arg_a)
flip_sign (&a);
return pack_d (&a);
}
+#endif
#ifdef FLOAT
+#if defined(L_make_sf)
SFtype
__make_fp(fp_class_type class,
unsigned int sign,
@@ -1300,6 +1442,7 @@ __make_fp(fp_class_type class,
in.fraction.ll = frac;
return pack_d (&in);
}
+#endif
#ifndef FLOAT_ONLY
@@ -1310,6 +1453,7 @@ __make_fp(fp_class_type class,
extern DFtype __make_dp (fp_class_type, unsigned int, int, UDItype frac);
+#if defined(L_sf_to_df)
DFtype
sf_to_df (SFtype arg_a)
{
@@ -1319,6 +1463,7 @@ sf_to_df (SFtype arg_a)
return __make_dp (in.class, in.sign, in.normal_exp,
((UDItype) in.fraction.ll) << F_D_BITOFF);
}
+#endif
#endif
#endif
@@ -1327,6 +1472,7 @@ sf_to_df (SFtype arg_a)
extern SFtype __make_fp (fp_class_type, unsigned int, int, USItype);
+#if defined(L_make_df)
DFtype
__make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
{
@@ -1338,15 +1484,27 @@ __make_dp (fp_class_type class, unsigned int sign, int exp, UDItype frac)
in.fraction.ll = frac;
return pack_d (&in);
}
+#endif
+#if defined(L_df_to_sf)
SFtype
df_to_sf (DFtype arg_a)
{
fp_number_type in;
+ USItype sffrac;
unpack_d ((FLO_union_type *) & arg_a, &in);
- return __make_fp (in.class, in.sign, in.normal_exp,
- in.fraction.ll >> F_D_BITOFF);
+
+ sffrac = in.fraction.ll >> F_D_BITOFF;
+
+ /* We set the lowest guard bit in SFFRAC if we discarded any non
+ zero bits. */
+ if ((in.fraction.ll & (((USItype) 1 << F_D_BITOFF) - 1)) != 0)
+ sffrac |= 1;
+
+ return __make_fp (in.class, in.sign, in.normal_exp, sffrac);
}
+#endif
#endif
+#endif /* !EXTENDED_FLOAT_STUBS */
diff --git a/contrib/gcc/config/gnu.h b/contrib/gcc/config/gnu.h
index 8ea3ead..d169164 100644
--- a/contrib/gcc/config/gnu.h
+++ b/contrib/gcc/config/gnu.h
@@ -1,12 +1,5 @@
/* Configuration common to all targets running the GNU system. */
-/* Macro to produce CPP_PREDEFINES for GNU on a given machine. */
-#define GNU_CPP_PREDEFINES(machine) \
-"-D" machine " -Acpu(" machine ") -Amachine(" machine ")" \
-"-Dunix -Asystem(unix) \
--DMACH -Asystem(mach) \
--D__GNU__ -Asystem(gnu)"
-
/* Provide GCC options for standard feature-test macros. */
#undef CPP_SPEC
#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{bsd:-D_BSD_SOURCE}"
diff --git a/contrib/gcc/config/i386/386bsd.h b/contrib/gcc/config/i386/386bsd.h
index cdab5f5..7962321 100644
--- a/contrib/gcc/config/i386/386bsd.h
+++ b/contrib/gcc/config/i386/386bsd.h
@@ -49,11 +49,6 @@
} \
}
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
@@ -68,13 +63,13 @@
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
-#define JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
/* Don't default to pcc-struct-return, because gcc is the only compiler, and
we want to retain compatibility with older gcc versions. */
diff --git a/contrib/gcc/config/i386/aix386ng.h b/contrib/gcc/config/i386/aix386ng.h
index 5d09fc3..a177b69 100644
--- a/contrib/gcc/config/i386/aix386ng.h
+++ b/contrib/gcc/config/i386/aix386ng.h
@@ -1,6 +1,6 @@
/* Definitions for IBM PS2 running AIX/386.
- From: Minh Tran-Le <TRANLE@intellicorp.com>
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1996 Free Software Foundation, Inc.
+ Contributed by Minh Tran-Le <TRANLE@intellicorp.com>.
This file is part of GNU CC.
@@ -27,7 +27,7 @@ Boston, MA 02111-1307, USA. */
#include "svr3.h"
/* Use the ATT assembler syntax.
- This overrides at least one macro (ASM_OUTPUT_LABELREF) from svr3.h. */
+ This overrides at least one macro (USER_LABEL_PREFIX) from svr3.h. */
#include "i386/att.h"
@@ -44,10 +44,10 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
-#define CPP_PREDEFINES "-Dps2 -Dunix -Di386 -Asystem(unix) -Asystem(aix) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dps2 -Dunix -Asystem(aix)"
-#define CPP_SPEC \
- "%{posix:-D_POSIX_SOURCE}%{!posix:-DAIX} -D_I386 -D_AIX -D_MBCS"
+#define CPP_SPEC "%(cpp_cpu) \
+ %{posix:-D_POSIX_SOURCE}%{!posix:-DAIX} -D_I386 -D_AIX -D_MBCS"
/* special flags for the aix assembler to generate the short form for all
qualifying forward reference */
@@ -132,8 +132,7 @@ const_section () \
# undef EXTRA_SECTION_FUNCTIONS
# define EXTRA_SECTION_FUNCTIONS \
- CONST_SECTION_FUNCTION \
- BSS_SECTION_FUNCTION
+ CONST_SECTION_FUNCTION
/* for collect2 */
# define OBJECT_FORMAT_COFF
diff --git a/contrib/gcc/config/i386/att.h b/contrib/gcc/config/i386/att.h
index f8bbb76..e5c2d9c 100644
--- a/contrib/gcc/config/i386/att.h
+++ b/contrib/gcc/config/i386/att.h
@@ -1,5 +1,5 @@
/* Definitions for AT&T assembler syntax for the Intel 80386.
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -68,18 +68,6 @@ do \
/* Can't use ASM_OUTPUT_SKIP in text section; it doesn't leave 0s. */
#define ASM_NO_SKIP_IN_TEXT 1
-
-#undef BSS_SECTION_FUNCTION /* Override the definition from svr3.h. */
-#define BSS_SECTION_FUNCTION \
-void \
-bss_section () \
-{ \
- if (in_section != in_bss) \
- { \
- fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); \
- in_section = in_bss; \
- } \
-}
/* Define the syntax of labels and symbol definitions/declarations. */
@@ -99,8 +87,7 @@ bss_section () \
#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
-/* This is how to output a reference to a user-level label named NAME. */
+/* The prefix to add to user-visible assembler symbols. */
-#undef ASM_OUTPUT_LABELREF
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- fprintf (FILE, "%s", NAME)
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
diff --git a/contrib/gcc/config/i386/bsd.h b/contrib/gcc/config/i386/bsd.h
index 6bf7399..d50be36 100644
--- a/contrib/gcc/config/i386/bsd.h
+++ b/contrib/gcc/config/i386/bsd.h
@@ -1,7 +1,7 @@
/* Definitions for BSD assembler syntax for Intel 386
(actually AT&T syntax for insns and operands,
adapted to BSD conventions for symbol names and debugging.)
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -115,12 +115,12 @@ Boston, MA 02111-1307, USA. */
fprintf (FILE, "%s%d:\n", PREFIX, NUM)
#endif
-/* This is how to output a reference to a user-level label named NAME. */
+/* The prefix to add to user-visible assembler symbols. */
#ifdef NO_UNDERSCORES
-#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "%s", NAME)
+#define USER_LABEL_PREFIX ""
#else
-#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "_%s", NAME)
+#define USER_LABEL_PREFIX "_"
#endif /* not NO_UNDERSCORES */
/* Sequent has some changes in the format of DBX symbols. */
diff --git a/contrib/gcc/config/i386/bsd386.h b/contrib/gcc/config/i386/bsd386.h
index 935a2e0..c0dcf87 100644
--- a/contrib/gcc/config/i386/bsd386.h
+++ b/contrib/gcc/config/i386/bsd386.h
@@ -1,5 +1,5 @@
-/* Configuration for an i386 running BSDI's BSD/386 1.1 as the target
- machine. */
+/* Configuration for an i386 running BSDI's BSD/OS (formerly known as BSD/386)
+ as the target machine. */
#include "i386/386bsd.h"
@@ -16,3 +16,18 @@
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
+
+/* This is suitable for BSD/OS 3.0; we don't know about earlier releases. */
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START " #"
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#define DWARF2_UNWIND_INFO 0
+
+/* BSD/OS still uses old binutils that don't insert nops by default
+ when the .align directive demands to insert extra space in the text
+ segment. */
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d,0x90\n", (LOG))
diff --git a/contrib/gcc/config/i386/crtdll.h b/contrib/gcc/config/i386/crtdll.h
new file mode 100644
index 0000000..f7eaf2b
--- /dev/null
+++ b/contrib/gcc/config/i386/crtdll.h
@@ -0,0 +1,42 @@
+/* Operating system specific defines to be used when targeting GCC for
+ hosting on Windows32, using GNU tools and the Windows32 API Library,
+ as distinct from winnt.h, which is used to build GCC for use with a
+ windows style library and tool set and uses the Microsoft tools.
+ This variant uses CRTDLL.DLL insted of MSVCRTDLL.DLL.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -D_WIN32 -DWIN32 -D__WIN32__ \
+ -D__MINGW32__ -DWINNT -D_X86_=1 -D__STDC__=1\
+ -D__stdcall=__attribute__((__stdcall__)) \
+ -D_stdcall=__attribute__((__stdcall__)) \
+ -D__cdecl=__attribute__((__cdecl__)) \
+ -D__declspec(x)=__attribute__((x)) \
+ -Asystem(winnt) -Acpu(i386) -Amachine(i386)"
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC "-lmingw32 -lgcc -lmoldname -lcrtdll"
+
+/* Specify a different entry point when linking a DLL */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{mdll:dllcrt1%O%s} %{!mdll:crt1%O%s}"
+
+#undef MATH_LIBRARY
+#define MATH_LIBRARY "-lcrtdll"
diff --git a/contrib/gcc/config/i386/dgux.c b/contrib/gcc/config/i386/dgux.c
new file mode 100644
index 0000000..ff36135
--- /dev/null
+++ b/contrib/gcc/config/i386/dgux.c
@@ -0,0 +1,190 @@
+/* Subroutines for GNU compiler for Intel 80x86 running DG/ux
+ Copyright (C) 1993, 1995, 1997 Free Software Foundation, Inc.
+ Currently maintained by (gcc@dg-rtp.dg.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+#include <time.h>
+#include "i386/i386.c"
+
+
+extern char *version_string;
+
+struct option
+{
+ char *string;
+ int *variable;
+ int on_value;
+};
+
+static int
+output_option (file, sep, type, name, indent, pos, max)
+ FILE *file;
+ char *sep;
+ char *type;
+ char *name;
+ char *indent;
+ int pos;
+ int max;
+{
+ if (strlen (sep) + strlen (type) + strlen (name) + pos > max)
+ {
+ fprintf (file, indent);
+ return fprintf (file, "%s%s", type, name);
+ }
+ return pos + fprintf (file, "%s%s%s", sep, type, name);
+}
+
+static struct { char *name; int value; } m_options[] = TARGET_SWITCHES;
+
+static void
+output_options (file, f_options, f_len, W_options, W_len,
+ pos, max, sep, indent, term)
+ FILE *file;
+ struct option *f_options;
+ struct option *W_options;
+ int f_len, W_len;
+ int pos;
+ int max;
+ char *indent;
+ char *term;
+{
+ register int j;
+
+ if (optimize)
+ pos = output_option (file, sep, "-O", "", indent, pos, max);
+ if (write_symbols != NO_DEBUG)
+ pos = output_option (file, sep, "-g", "", indent, pos, max);
+/* if (flag_traditional)
+ pos = output_option (file, sep, "-traditional", "", indent, pos, max);*/
+ if (profile_flag)
+ pos = output_option (file, sep, "-p", "", indent, pos, max);
+ if (profile_block_flag)
+ pos = output_option (file, sep, "-a", "", indent, pos, max);
+
+ for (j = 0; j < f_len; j++)
+ if (*f_options[j].variable == f_options[j].on_value)
+ pos = output_option (file, sep, "-f", f_options[j].string,
+ indent, pos, max);
+
+ for (j = 0; j < W_len; j++)
+ if (*W_options[j].variable == W_options[j].on_value)
+ pos = output_option (file, sep, "-W", W_options[j].string,
+ indent, pos, max);
+
+ for (j = 0; j < sizeof m_options / sizeof m_options[0]; j++)
+ if (m_options[j].name[0] != '\0'
+ && m_options[j].value > 0
+ && ((m_options[j].value & target_flags)
+ == m_options[j].value))
+ pos = output_option (file, sep, "-m", m_options[j].name,
+ indent, pos, max);
+
+ pos = output_option (file, sep, "-mcpu=", ix86_cpu_string, indent, pos, max);
+ pos = output_option (file, sep, "-march=", ix86_arch_string, indent, pos, max);
+ fprintf (file, term);
+}
+
+/* Output to FILE the start of the assembler file. */
+
+void
+output_file_start (file, f_options, f_len, W_options, W_len)
+ FILE *file;
+ struct option *f_options;
+ struct option *W_options;
+ int f_len, W_len;
+{
+ register int pos;
+
+ output_file_directive (file, main_input_filename);
+ fprintf (file, "\t.version\t\"01.01\"\n"); \
+ /* Switch to the data section so that the coffsem symbol and the
+ gcc2_compiled. symbol aren't in the text section. */
+ data_section ();
+
+ pos = fprintf (file, "\n// cc1 (%s) arguments:", VERSION_STRING);
+ output_options (file, f_options, f_len, W_options, W_len,
+ pos, 75, " ", "\n// ", "\n\n");
+
+#ifdef TARGET_IDENTIFY_REVISION
+ if (TARGET_IDENTIFY_REVISION)
+ {
+ char indent[256];
+
+ time_t now = time ((time_t *)0);
+ sprintf (indent, "]\"\n\t%s\t \"@(#)%s [", IDENT_ASM_OP, main_input_filename);
+ fprintf (file, indent+3);
+ pos = fprintf (file, "gcc %s, %.24s,", VERSION_STRING, ctime (&now));
+ output_options (file, f_options, f_len, W_options, W_len,
+ pos, 150 - strlen (indent), " ", indent, "]\"\n\n");
+ }
+#endif /* TARGET_IDENTIFY_REVISION */
+}
+
+#ifndef CROSS_COMPILE
+#if defined (_abort_aux)
+/* Debugging aid to be registered via `atexit'. See the definition
+ of abort in dgux.h. */
+void
+abort_aux ()
+{
+ extern int insn_;
+ extern char * file_;
+ extern int line_;
+ static int done;
+ rtx line_note;
+
+ if (done++)
+ return;
+ if (file_ || line_)
+ {
+ if (write_symbols != NO_DEBUG)
+ {
+ for (line_note = (rtx) insn_ ; line_note != 0 ; line_note = PREV_INSN (line_note))
+ if (GET_CODE (line_note) == NOTE && NOTE_LINE_NUMBER (line_note) > 0)
+ break;
+ if (line_note != 0)
+ {
+ error_with_file_and_line (NOTE_SOURCE_FILE (line_note),
+ NOTE_LINE_NUMBER (line_note),
+ "Internal gcc abort from %s:%d",
+ file_ ? file_ : "<nofile>", line_);
+ if (insn_ && file_ && strcmp (file_, "toplev.c"))
+ {
+ error_with_file_and_line (NOTE_SOURCE_FILE (line_note),
+ NOTE_LINE_NUMBER (line_note),
+ "The local variable `insn' has the value:", 0);
+ debug_rtx ((rtx) insn_);
+ }
+ }
+ }
+ if (write_symbols == NO_DEBUG || line_note == 0)
+ {
+ error ("Internal gcc abort from %s:%d",
+ file_ ? file_ : "<nofile>", line_);
+ if (insn_ && file_ && strcmp (file_, "toplev.c"))
+ {
+ error ("The local variable `insn' has the value:", 0);
+ debug_rtx ((rtx) insn_);
+ }
+ }
+ }
+}
+#endif
+#endif
+
+
diff --git a/contrib/gcc/config/i386/dgux.h b/contrib/gcc/config/i386/dgux.h
new file mode 100644
index 0000000..dae050f
--- /dev/null
+++ b/contrib/gcc/config/i386/dgux.h
@@ -0,0 +1,265 @@
+/* Target definitions for GNU compiler for Intel 80x86 running DG/ux
+ Copyright (C) 1993, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Currently maintained by gcc@dg-rtp.dg.com.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* for now, we are just like the sysv4 version with a
+ few hacks
+*/
+
+#include "i386/sysv4.h"
+
+#ifndef VERSION_INFO2
+#define VERSION_INFO2 "$Revision: 1.3 $"
+#endif
+
+#ifndef VERSION_STRING
+#define VERSION_STRING version_string
+#endif
+
+/* Identify the compiler. */
+/* TARGET_VERSION used by toplev.c VERSION_STRING used by -midentify-revision */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (%s%s, %s)", \
+ VERSION_INFO1, VERSION_INFO2, __DATE__)
+#undef VERSION_INFO1
+#define VERSION_INFO1 "ix86 DG/ux, "
+
+/* Augment TARGET_SWITCHES with the MXDB options. */
+#define MASK_STANDARD 0x40000000 /* Retain standard information */
+#define MASK_NOLEGEND 0x20000000 /* Discard legend information */
+#define MASK_EXTERNAL_LEGEND 0x10000000 /* Make external legends */
+#define MASK_IDENTIFY_REVISION 0x08000000 /* Emit 'ident' to .s */
+#define MASK_WARN_PASS_STRUCT 0x04000000 /* Emit 'ident' to .s */
+
+#define TARGET_STANDARD (target_flags & MASK_STANDARD)
+#define TARGET_NOLEGEND (target_flags & MASK_NOLEGEND)
+#define TARGET_EXTERNAL_LEGEND (target_flags & MASK_EXTERNAL_LEGEND)
+#define TARGET_IDENTIFY_REVISION (target_flags & MASK_IDENTIFY_REVISION)
+#define TARGET_WARN_PASS_STRUCT (target_flags & MASK_WARN_PASS_STRUCT)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ { "standard", MASK_STANDARD }, \
+ { "legend", -MASK_NOLEGEND }, \
+ { "no-legend", MASK_NOLEGEND }, \
+ { "external-legend", MASK_EXTERNAL_LEGEND }, \
+ { "identify-revision", MASK_IDENTIFY_REVISION }, \
+ { "warn-passed-structs", MASK_WARN_PASS_STRUCT },
+
+#undef DWARF_DEBUGGING_INFO
+#define DWARF_DEBUGGING_INFO
+
+/*
+ allow -gstabs so that those who have gnu-as installed
+ can debug c++ programs.
+*/
+#undef DBX_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#define PREFERRED_DEBUGGING_TYPE DWARF_DEBUG
+
+/* Override svr[34].h. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ output_file_start (FILE, f_options, sizeof f_options / sizeof f_options[0], \
+ W_options, sizeof W_options / sizeof W_options[0])
+
+/* ix86 abi specified type for wchar_t */
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+
+/* Some machines may desire to change what optimizations are performed for
+ various optimization levels. This macro, if defined, is executed once
+ just after the optimization level is determined and before the remainder
+ of the command options have been parsed. Values set in this macro are
+ used as the default values for the other command line options.
+
+ LEVEL is the optimization level specified; 2 if -O2 is specified,
+ 1 if -O is specified, and 0 if neither is specified. */
+
+/* This macro used to store 0 in flag_signed_bitfields.
+ Not only is that misuse of this macro; the whole idea is wrong.
+
+ The GNU C dialect makes bitfields signed by default,
+ regardless of machine type. Making any machine inconsistent in this
+ regard is bad for portability.
+
+ I chose to make bitfields signed by default because this is consistent
+ with the way ordinary variables are handled: `int' equals `signed int'.
+ If there is a good reason to prefer making bitfields unsigned by default,
+ it cannot have anything to do with the choice of machine.
+ If the reason is good enough, we should change the convention for all machines.
+
+ -- rms, 20 July 1991. */
+
+/*
+ this really should go into dgux-local.h
+*/
+
+#undef OPTIMIZATION_OPTIONS
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) \
+ do { \
+ extern int flag_signed_bitfields; \
+ flag_signed_bitfields = 0; \
+ abort_helper (); \
+ optimization_options (LEVEL,SIZE); \
+ } while (0)
+
+
+/* The normal location of the `ld' and `as' programs */
+
+#undef MD_EXEC_PREFIX
+#define MD_EXEC_PREFIX "/usr/bin/"
+
+/* The normal location of the various *crt*.o files is the */
+
+#undef MD_STARTFILE_PREFIX
+#define MD_STARTFILE_PREFIX "/usr/lib/"
+
+/* Macros to be automatically defined.
+ __CLASSIFY_TYPE__ is used in the <varargs.h> and <stdarg.h> header
+ files with DG/UX revision 5.40 and later. This allows GNU CC to
+ operate without installing the header files. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -D__ix86 -Dunix -DDGUX -D__CLASSIFY_TYPE__=2\
+ -Asystem(unix) -Asystem(svr4) -Acpu(i386) -Amachine(i386)"
+
+ /*
+ If not -ansi, -traditional, or restricting include files to one
+ specific source target, specify full DG/UX features.
+ */
+#undef CPP_SPEC
+#define CPP_SPEC "%{!ansi:%{!traditional:-D__OPEN_NAMESPACE__}}"
+
+/* Assembler support (legends for mxdb). */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{mno-legend:%{mstandard:-Wc,off}}\
+%{g:%{!mno-legend:-Wc,-fix-bb,-s\"%i\"\
+%{traditional:,-lc}%{!traditional:,-lansi-c}\
+%{mstandard:,-keep-std}\
+%{mexternal-legend:,-external}}}"
+
+/* Override svr4.h. */
+
+/* hassey 3/12/94 keep svr4 ASM_FINAL_SPEC allows -pipe to work */
+
+/* Linker and library spec's.
+ -static, -shared, -symbolic, -h* and -z* access AT&T V.4 link options.
+ -svr4 instructs gcc to place /usr/lib/values-X[cat].o on link the line.
+ The absence of -msvr4 indicates linking done in a COFF environment and
+ adds the link script to the link line. In all environments, the first
+ and last objects are crtbegin.o and crtend.o.
+ When the -G link option is used (-shared and -symbolic) a final link is
+ not being done. */
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+"%{!shared:%{!symbolic:-lc}}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{z*} %{h*} %{v:-V} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy} \
+ %{symbolic:-Bsymbolic -G -dy} \
+ %{pg:-L/usr/lib/libp}%{p:-L/usr/lib/libp}"
+
+#ifdef CROSS_COMPILE
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:%{!symbolic:%{pg:gcrt1.o%s} \
+ %{!pg:%{p:mcrt1.o%s} \
+ %{!p:crt1.o%s}}}} \
+ %{pg:gcrti.o%s}%{!pg:crti.o%s} \
+ crtbegin.o%s \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi:%{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o}%{!pg:crtn.o%s}"
+
+#else
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:%{!symbolic:%{pg:gcrt1.o%s} \
+ %{!pg:%{p:/lib/mcrt1.o%s} \
+ %{!p:/lib/crt1.o%s}}} \
+ %{pg:gcrti.o%s}%{!pg:/lib/crti.o%s}} \
+ crtbegin.o%s \
+ %{ansi:/lib/values-Xc.o%s} \
+ %{!ansi:%{traditional:/lib/values-Xt.o%s} \
+ %{!traditional:/lib/values-Xa.o%s}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o}%{!pg:/lib/crtn.o}"
+
+#endif /* CROSS_COMPILE */
+
+#if !defined (no_abort) || defined (CRT_BEGIN) || defined (CRT_END)
+#undef abort
+
+char insn; int insn_; char * file_; int line_;
+#define abort() \
+ (insn_ = (int) insn, \
+ file_ = __FILE__, \
+ line_ = __LINE__, \
+ fancy_abort ())
+#define abort_helper() \
+ do { \
+ extern void abort_aux (); \
+ atexit (abort_aux); \
+ } while (0)
+#define _abort_aux
+#endif /* no abort */
+
+/* The maximum alignment which the object file format can support.
+ page alignment would seem to be enough */
+#undef MAX_OFILE_ALIGNMENT
+#define MAX_OFILE_ALIGNMENT 0x1000
+
+/* Must use data section for relocatable constants when pic. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) \
+{ \
+ if (flag_pic && symbolic_operand (RTX)) \
+ data_section (); \
+ else \
+ const_section (); \
+}
+
+/* This supplements FUNCTION_ARG's definition in i386.h to check
+ TARGET_WARN_PASS_STRUCT */
+
+#undef FUNCTION_ARG
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+((((MODE) == BLKmode && TARGET_WARN_PASS_STRUCT) ? \
+ warning ("argument is a structure"),0 : 0), \
+ (function_arg (&CUM, MODE, TYPE, NAMED)))
+
+/* Add .align 1 to avoid .backalign bug in assembler */
+#undef CONST_SECTION_ASM_OP
+#define CONST_SECTION_ASM_OP ".section\t.rodata\n\t.align 1"
diff --git a/contrib/gcc/config/i386/freebsd-elf.h b/contrib/gcc/config/i386/freebsd-elf.h
index 393ede7..0a556fe 100644
--- a/contrib/gcc/config/i386/freebsd-elf.h
+++ b/contrib/gcc/config/i386/freebsd-elf.h
@@ -1,8 +1,8 @@
/* Definitions for Intel 386 running FreeBSD with ELF format
- Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1996 Free Software Foundation, Inc.
Contributed by Eric Youngdale.
Modified for stabs-in-ELF by H.J. Lu.
- Adapted from Linux version by John Polstra.
+ Adapted from GNU/Linux version by John Polstra.
This file is part of GNU CC.
@@ -21,31 +21,29 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* A lie, I guess, but the general idea behind FreeBSD/ELF is that we are
- supposed to be outputting something that will assemble under SVr4.
- This gets us pretty close. */
-#include <i386/i386.h> /* Base i386 target machine definitions */
-#include <i386/att.h> /* Use the i386 AT&T assembler syntax */
-#include <linux.h> /* some common stuff */
-
#undef TARGET_VERSION
#define TARGET_VERSION fprintf (stderr, " (i386 FreeBSD/ELF)");
/* The svr4 ABI for the i386 says that records and unions are returned
in memory. */
+/* On FreeBSD, we do not. */
#undef DEFAULT_PCC_STRUCT_RETURN
-#define DEFAULT_PCC_STRUCT_RETURN 1
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* This gets defined in tm.h->linux.h->svr4.h, and keeps us from using
+ libraries compiled with the native cc, so undef it. */
+#undef NO_DOLLAR_IN_LABEL
/* This is how to output an element of a case-vector that is relative.
This is only used for PIC code. See comments by the `casesi' insn in
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
-#define JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
/* Copy this from the svr4 specifications... */
/* Define the register numbers to be used in Dwarf debugging information.
@@ -147,14 +145,10 @@ Boston, MA 02111-1307, USA. */
#define WCHAR_TYPE_SIZE BITS_PER_WORD
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -D__ELF__ -D__FreeBSD__=2 -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Di386 -Dunix -D__ELF__ -D__FreeBSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
#undef CPP_SPEC
-#if TARGET_CPU_DEFAULT == 2
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{!m386:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#else
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{m486:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#endif
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
#undef LIB_SPEC
#if 1
@@ -196,4 +190,16 @@ Boston, MA 02111-1307, USA. */
%{static:-static}}}"
/* Get perform_* macros to build libgcc.a. */
-#include "i386/perform.h"
+
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
+
+ This is used to align code labels according to Intel recommendations. */
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG)!=0) \
+ if ((MAX_SKIP)==0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP))
+#endif
diff --git a/contrib/gcc/config/i386/freebsd.h b/contrib/gcc/config/i386/freebsd.h
index c4e9991..0a556fe 100644
--- a/contrib/gcc/config/i386/freebsd.h
+++ b/contrib/gcc/config/i386/freebsd.h
@@ -1,7 +1,8 @@
-/* Definitions of target machine for GNU compiler for Intel 80386
- running FreeBSD.
- Copyright (C) 1988, 1992, 1994 Free Software Foundation, Inc.
- Contributed by Poul-Henning Kamp <phk@login.dkuug.dk>
+/* Definitions for Intel 386 running FreeBSD with ELF format
+ Copyright (C) 1996 Free Software Foundation, Inc.
+ Contributed by Eric Youngdale.
+ Modified for stabs-in-ELF by H.J. Lu.
+ Adapted from GNU/Linux version by John Polstra.
This file is part of GNU CC.
@@ -20,231 +21,185 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* This goes away when the math-emulator is fixed */
-#define TARGET_CPU_DEFAULT 0400 /* TARGET_NO_FANCY_MATH_387 */
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 FreeBSD/ELF)");
-/* This is tested by i386gas.h. */
-#define YES_UNDERSCORES
-
-/* Don't assume anything about the header files. */
-#define NO_IMPLICIT_EXTERN_C
-
-#include "i386/gstabs.h"
-
-/* Get perform_* macros to build libgcc.a. */
-#include "i386/perform.h"
-
-#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -D__FreeBSD__ -D__386BSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
-
-/* Like the default, except no -lg. */
-#define LIB_SPEC "%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
-
-#undef SIZE_TYPE
-#define SIZE_TYPE "unsigned int"
-
-#undef PTRDIFF_TYPE
-#define PTRDIFF_TYPE "int"
-
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "short unsigned int"
-
-#define WCHAR_UNSIGNED 1
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 16
-
-#define HAVE_ATEXIT
-
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
-#undef ASM_APP_ON
-#define ASM_APP_ON "#APP\n"
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+/* On FreeBSD, we do not. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
-#undef ASM_APP_OFF
-#define ASM_APP_OFF "#NO_APP\n"
-
-/* The following macros are stolen from i386v4.h */
-/* These have to be defined to get PIC code correct */
+/* This gets defined in tm.h->linux.h->svr4.h, and keeps us from using
+ libraries compiled with the native cc, so undef it. */
+#undef NO_DOLLAR_IN_LABEL
/* This is how to output an element of a case-vector that is relative.
This is only used for PIC code. See comments by the `casesi' insn in
i386.md for an explanation of the expression this outputs. */
-
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* Copy this from the svr4 specifications... */
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+ 0 for %eax (gnu regno = 0)
+ 1 for %ecx (gnu regno = 2)
+ 2 for %edx (gnu regno = 1)
+ 3 for %ebx (gnu regno = 3)
+ 4 for %esp (gnu regno = 7)
+ 5 for %ebp (gnu regno = 6)
+ 6 for %esi (gnu regno = 4)
+ 7 for %edi (gnu regno = 5)
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+ 8 for %eip (no gnu equivalent)
+ 9 for %eflags (no gnu equivalent)
+ 10 for %trapno (no gnu equivalent)
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+ 11 for %st(0) (gnu regno = 8)
+ 12 for %st(1) (gnu regno = 9)
+ 13 for %st(2) (gnu regno = 10)
+ 14 for %st(3) (gnu regno = 11)
+ 15 for %st(4) (gnu regno = 12)
+ 16 for %st(5) (gnu regno = 13)
+ 17 for %st(6) (gnu regno = 14)
+ 18 for %st(7) (gnu regno = 15)
+*/
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
-#define JUMP_TABLES_IN_TEXT_SECTION
-
-/* Don't default to pcc-struct-return, because gcc is the only compiler, and
- we want to retain compatibility with older gcc versions. */
-#define DEFAULT_PCC_STRUCT_RETURN 0
-
-/* Profiling routines, partially copied from i386/osfrose.h. */
-
-/* Redefine this to use %eax instead of %edx. */
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
{ \
if (flag_pic) \
{ \
- fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%eax\n", \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
LPREFIX, (LABELNO)); \
fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
} \
else \
{ \
- fprintf (FILE, "\tmovl $%sP%d,%%eax\n", LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
fprintf (FILE, "\tcall mcount\n"); \
} \
}
-/*
- * Some imports from svr4.h in support of shared libraries.
- * Currently, we need the DECLARE_OBJECT_SIZE stuff.
- */
-
-/* Define the strings used for the special svr4 .type and .size directives.
- These strings generally do not vary from one system running svr4 to
- another, but if a given system (e.g. m88k running svr) needs to use
- different pseudo-op names for these, they may be overridden in the
- file which includes this one. */
-
-#define TYPE_ASM_OP ".type"
-#define SIZE_ASM_OP ".size"
-
-/* The following macro defines the format used to output the second
- operand of the .type assembler directive. Different svr4 assemblers
- expect various different forms for this operand. The one given here
- is just a default. You may need to override it in your machine-
- specific tm.h file (depending upon the particulars of your assembler). */
-
-#define TYPE_OPERAND_FMT "@%s"
-
-/* Write the extra assembler code needed to declare a function's result.
- Most svr4 assemblers don't require any special declaration of the
- result value, but there are exceptions. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -Dunix -D__ELF__ -D__FreeBSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
-#ifndef ASM_DECLARE_RESULT
-#define ASM_DECLARE_RESULT(FILE, RESULT)
-#endif
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
-/* These macros generate the special .type and .size directives which
- are used to set the corresponding fields of the linker symbol table
- entries in an ELF object file under SVR4. These macros also output
- the starting labels for the relevant functions/objects. */
-
-/* Write the extra assembler code needed to declare a function properly.
- Some svr4 assemblers need to also have something extra said about the
- function's return value. We allow for that here. */
-
-#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
- do { \
- fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
- assemble_name (FILE, NAME); \
- putc (',', FILE); \
- fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
- putc ('\n', FILE); \
- ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
- ASM_OUTPUT_LABEL(FILE, NAME); \
- } while (0)
-
-/* Write the extra assembler code needed to declare an object properly. */
-
-#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
- do { \
- fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
- assemble_name (FILE, NAME); \
- putc (',', FILE); \
- fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
- putc ('\n', FILE); \
- size_directive_output = 0; \
- if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
- { \
- size_directive_output = 1; \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, NAME); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
- } \
- ASM_OUTPUT_LABEL(FILE, NAME); \
- } while (0)
-
-/* Output the size directive for a decl in rest_of_decl_compilation
- in the case where we did not do so before the initializer.
- Once we find the error_mark_node, we know that the value of
- size_directive_output was set
- by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
-
-#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
-do { \
- char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
- if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
- && ! AT_END && TOP_LEVEL \
- && DECL_INITIAL (DECL) == error_mark_node \
- && !size_directive_output) \
- { \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, name); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL)));\
- } \
- } while (0)
-
-
-/* This is how to declare the size of a function. */
-
-#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
- do { \
- if (!flag_inhibit_size_directive) \
- { \
- char label[256]; \
- static int labelno; \
- labelno++; \
- ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
- ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, (FNAME)); \
- fprintf (FILE, ","); \
- assemble_name (FILE, label); \
- fprintf (FILE, "-"); \
- assemble_name (FILE, (FNAME)); \
- putc ('\n', FILE); \
- } \
- } while (0)
-
-#define ASM_SPEC " %| %{fpic:-k} %{fPIC:-k}"
-#define LINK_SPEC \
- "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{static:-Bstatic} %{assert*}"
-
-/* This is defined when gcc is compiled in the BSD-directory-tree, and must
- * make up for the gap to all the stuff done in the GNU-makefiles.
+#undef LIB_SPEC
+#if 1
+/* We no longer link with libc_p.a or libg.a by default. If you
+ * want to profile or debug the C library, please add
+ * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
*/
+#define LIB_SPEC \
+ "%{!shared: %{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} \
+ %{!ggdb:-lc} %{ggdb:-lg}}"
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
-#ifdef FREEBSD_NATIVE
+/* Provide a LINK_SPEC appropriate for FreeBSD. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf_i386 %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld-elf.so.1}} \
+ %{static:-static}}}"
-#define INCLUDE_DEFAULTS { \
- { "/usr/include", 0 }, \
- { "/usr/include/g++", 1 }, \
- { 0, 0} \
- }
+/* Get perform_* macros to build libgcc.a. */
-#undef MD_EXEC_PREFIX
-#define MD_EXEC_PREFIX "/usr/libexec/"
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
-#undef STANDARD_STARTFILE_PREFIX
-#define STANDARD_STARTFILE_PREFIX "/usr/lib"
+ This is used to align code labels according to Intel recommendations. */
-#if 0 /* This is very wrong!!! */
-#define DEFAULT_TARGET_MACHINE "i386-unknown-freebsd_1.0"
-#define GPLUSPLUS_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
-#define TOOL_INCLUDE_DIR "/usr/local/i386-unknown-freebsd_1.0/include"
-#define GCC_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG)!=0) \
+ if ((MAX_SKIP)==0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP))
#endif
-
-#endif /* FREEBSD_NATIVE */
diff --git a/contrib/gcc/config/i386/freebsd.h.fixed b/contrib/gcc/config/i386/freebsd.h.fixed
index c4e9991..0a556fe 100644
--- a/contrib/gcc/config/i386/freebsd.h.fixed
+++ b/contrib/gcc/config/i386/freebsd.h.fixed
@@ -1,7 +1,8 @@
-/* Definitions of target machine for GNU compiler for Intel 80386
- running FreeBSD.
- Copyright (C) 1988, 1992, 1994 Free Software Foundation, Inc.
- Contributed by Poul-Henning Kamp <phk@login.dkuug.dk>
+/* Definitions for Intel 386 running FreeBSD with ELF format
+ Copyright (C) 1996 Free Software Foundation, Inc.
+ Contributed by Eric Youngdale.
+ Modified for stabs-in-ELF by H.J. Lu.
+ Adapted from GNU/Linux version by John Polstra.
This file is part of GNU CC.
@@ -20,231 +21,185 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-/* This goes away when the math-emulator is fixed */
-#define TARGET_CPU_DEFAULT 0400 /* TARGET_NO_FANCY_MATH_387 */
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 FreeBSD/ELF)");
-/* This is tested by i386gas.h. */
-#define YES_UNDERSCORES
-
-/* Don't assume anything about the header files. */
-#define NO_IMPLICIT_EXTERN_C
-
-#include "i386/gstabs.h"
-
-/* Get perform_* macros to build libgcc.a. */
-#include "i386/perform.h"
-
-#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -D__FreeBSD__ -D__386BSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
-
-/* Like the default, except no -lg. */
-#define LIB_SPEC "%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
-
-#undef SIZE_TYPE
-#define SIZE_TYPE "unsigned int"
-
-#undef PTRDIFF_TYPE
-#define PTRDIFF_TYPE "int"
-
-#undef WCHAR_TYPE
-#define WCHAR_TYPE "short unsigned int"
-
-#define WCHAR_UNSIGNED 1
-
-#undef WCHAR_TYPE_SIZE
-#define WCHAR_TYPE_SIZE 16
-
-#define HAVE_ATEXIT
-
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
-#undef ASM_APP_ON
-#define ASM_APP_ON "#APP\n"
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+/* On FreeBSD, we do not. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
-#undef ASM_APP_OFF
-#define ASM_APP_OFF "#NO_APP\n"
-
-/* The following macros are stolen from i386v4.h */
-/* These have to be defined to get PIC code correct */
+/* This gets defined in tm.h->linux.h->svr4.h, and keeps us from using
+ libraries compiled with the native cc, so undef it. */
+#undef NO_DOLLAR_IN_LABEL
/* This is how to output an element of a case-vector that is relative.
This is only used for PIC code. See comments by the `casesi' insn in
i386.md for an explanation of the expression this outputs. */
-
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* Copy this from the svr4 specifications... */
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+ 0 for %eax (gnu regno = 0)
+ 1 for %ecx (gnu regno = 2)
+ 2 for %edx (gnu regno = 1)
+ 3 for %ebx (gnu regno = 3)
+ 4 for %esp (gnu regno = 7)
+ 5 for %ebp (gnu regno = 6)
+ 6 for %esi (gnu regno = 4)
+ 7 for %edi (gnu regno = 5)
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+ 8 for %eip (no gnu equivalent)
+ 9 for %eflags (no gnu equivalent)
+ 10 for %trapno (no gnu equivalent)
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+ 11 for %st(0) (gnu regno = 8)
+ 12 for %st(1) (gnu regno = 9)
+ 13 for %st(2) (gnu regno = 10)
+ 14 for %st(3) (gnu regno = 11)
+ 15 for %st(4) (gnu regno = 12)
+ 16 for %st(5) (gnu regno = 13)
+ 17 for %st(6) (gnu regno = 14)
+ 18 for %st(7) (gnu regno = 15)
+*/
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
-#define JUMP_TABLES_IN_TEXT_SECTION
-
-/* Don't default to pcc-struct-return, because gcc is the only compiler, and
- we want to retain compatibility with older gcc versions. */
-#define DEFAULT_PCC_STRUCT_RETURN 0
-
-/* Profiling routines, partially copied from i386/osfrose.h. */
-
-/* Redefine this to use %eax instead of %edx. */
#undef FUNCTION_PROFILER
#define FUNCTION_PROFILER(FILE, LABELNO) \
{ \
if (flag_pic) \
{ \
- fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%eax\n", \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
LPREFIX, (LABELNO)); \
fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
} \
else \
{ \
- fprintf (FILE, "\tmovl $%sP%d,%%eax\n", LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
fprintf (FILE, "\tcall mcount\n"); \
} \
}
-/*
- * Some imports from svr4.h in support of shared libraries.
- * Currently, we need the DECLARE_OBJECT_SIZE stuff.
- */
-
-/* Define the strings used for the special svr4 .type and .size directives.
- These strings generally do not vary from one system running svr4 to
- another, but if a given system (e.g. m88k running svr) needs to use
- different pseudo-op names for these, they may be overridden in the
- file which includes this one. */
-
-#define TYPE_ASM_OP ".type"
-#define SIZE_ASM_OP ".size"
-
-/* The following macro defines the format used to output the second
- operand of the .type assembler directive. Different svr4 assemblers
- expect various different forms for this operand. The one given here
- is just a default. You may need to override it in your machine-
- specific tm.h file (depending upon the particulars of your assembler). */
-
-#define TYPE_OPERAND_FMT "@%s"
-
-/* Write the extra assembler code needed to declare a function's result.
- Most svr4 assemblers don't require any special declaration of the
- result value, but there are exceptions. */
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -Dunix -D__ELF__ -D__FreeBSD__ -Asystem(unix) -Asystem(FreeBSD) -Acpu(i386) -Amachine(i386)"
-#ifndef ASM_DECLARE_RESULT
-#define ASM_DECLARE_RESULT(FILE, RESULT)
-#endif
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
-/* These macros generate the special .type and .size directives which
- are used to set the corresponding fields of the linker symbol table
- entries in an ELF object file under SVR4. These macros also output
- the starting labels for the relevant functions/objects. */
-
-/* Write the extra assembler code needed to declare a function properly.
- Some svr4 assemblers need to also have something extra said about the
- function's return value. We allow for that here. */
-
-#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
- do { \
- fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
- assemble_name (FILE, NAME); \
- putc (',', FILE); \
- fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
- putc ('\n', FILE); \
- ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
- ASM_OUTPUT_LABEL(FILE, NAME); \
- } while (0)
-
-/* Write the extra assembler code needed to declare an object properly. */
-
-#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
- do { \
- fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
- assemble_name (FILE, NAME); \
- putc (',', FILE); \
- fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
- putc ('\n', FILE); \
- size_directive_output = 0; \
- if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
- { \
- size_directive_output = 1; \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, NAME); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
- } \
- ASM_OUTPUT_LABEL(FILE, NAME); \
- } while (0)
-
-/* Output the size directive for a decl in rest_of_decl_compilation
- in the case where we did not do so before the initializer.
- Once we find the error_mark_node, we know that the value of
- size_directive_output was set
- by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
-
-#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
-do { \
- char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
- if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
- && ! AT_END && TOP_LEVEL \
- && DECL_INITIAL (DECL) == error_mark_node \
- && !size_directive_output) \
- { \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, name); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL)));\
- } \
- } while (0)
-
-
-/* This is how to declare the size of a function. */
-
-#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
- do { \
- if (!flag_inhibit_size_directive) \
- { \
- char label[256]; \
- static int labelno; \
- labelno++; \
- ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
- ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
- fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
- assemble_name (FILE, (FNAME)); \
- fprintf (FILE, ","); \
- assemble_name (FILE, label); \
- fprintf (FILE, "-"); \
- assemble_name (FILE, (FNAME)); \
- putc ('\n', FILE); \
- } \
- } while (0)
-
-#define ASM_SPEC " %| %{fpic:-k} %{fPIC:-k}"
-#define LINK_SPEC \
- "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{static:-Bstatic} %{assert*}"
-
-/* This is defined when gcc is compiled in the BSD-directory-tree, and must
- * make up for the gap to all the stuff done in the GNU-makefiles.
+#undef LIB_SPEC
+#if 1
+/* We no longer link with libc_p.a or libg.a by default. If you
+ * want to profile or debug the C library, please add
+ * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
*/
+#define LIB_SPEC \
+ "%{!shared: %{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} \
+ %{!ggdb:-lc} %{ggdb:-lg}}"
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
-#ifdef FREEBSD_NATIVE
+/* Provide a LINK_SPEC appropriate for FreeBSD. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf_i386 %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld-elf.so.1}} \
+ %{static:-static}}}"
-#define INCLUDE_DEFAULTS { \
- { "/usr/include", 0 }, \
- { "/usr/include/g++", 1 }, \
- { 0, 0} \
- }
+/* Get perform_* macros to build libgcc.a. */
-#undef MD_EXEC_PREFIX
-#define MD_EXEC_PREFIX "/usr/libexec/"
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
-#undef STANDARD_STARTFILE_PREFIX
-#define STANDARD_STARTFILE_PREFIX "/usr/lib"
+ This is used to align code labels according to Intel recommendations. */
-#if 0 /* This is very wrong!!! */
-#define DEFAULT_TARGET_MACHINE "i386-unknown-freebsd_1.0"
-#define GPLUSPLUS_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
-#define TOOL_INCLUDE_DIR "/usr/local/i386-unknown-freebsd_1.0/include"
-#define GCC_INCLUDE_DIR "/usr/local/lib/gcc-lib/i386-unknown-freebsd_1.0/2.5.8/include"
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG)!=0) \
+ if ((MAX_SKIP)==0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP))
#endif
-
-#endif /* FREEBSD_NATIVE */
diff --git a/contrib/gcc/config/i386/gas.h b/contrib/gcc/config/i386/gas.h
index d020157..173bf19 100644
--- a/contrib/gcc/config/i386/gas.h
+++ b/contrib/gcc/config/i386/gas.h
@@ -1,5 +1,5 @@
-/* Definitions for Intel 386 running system V with gnu tools
- Copyright (C) 1988, 1993, 1994 Free Software Foundation, Inc.
+/* Definitions for Intel 386 using GAS.
+ Copyright (C) 1988, 1993, 1994, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -56,8 +56,8 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
-#define CPP_PREDEFINES "-Dunix -Di386 -Asystem(unix) -Acpu(i386) -Amachine(i386)"
-#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
+#define CPP_PREDEFINES "-Dunix"
+#define CPP_SPEC "%(cpp_cpu) %{posix:-D_POSIX_SOURCE}"
/* Allow #sccs in preprocessor. */
@@ -71,27 +71,33 @@ Boston, MA 02111-1307, USA. */
#define TARGET_MEM_FUNCTIONS
-#if 0 /* People say gas uses the log as the arg to .align. */
-/* When using gas, .align N aligns to an N-byte boundary. */
+/* In the past there was confusion as to what the argument to .align was
+ in GAS. For the last several years the rule has been this: for a.out
+ file formats that argument is LOG, and for all other file formats the
+ argument is 1<<LOG.
+ However, GAS now has .p2align and .balign pseudo-ops so to remove any
+ doubt or guess work, and since this file is used for both a.out and other
+ file formats, we use one of them. */
+
+#ifdef HAVE_GAS_BALIGN_AND_P2ALIGN
#undef ASM_OUTPUT_ALIGN
-#define ASM_OUTPUT_ALIGN(FILE,LOG) \
- if ((LOG)!=0) fprintf ((FILE), "\t.align %d\n", 1<<(LOG))
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.balign %d\n", 1<<(LOG))
#endif
-/* Align labels, etc. at 4-byte boundaries.
- For the 486, align to 16-byte boundary for sake of cache. */
-
-#undef ASM_OUTPUT_ALIGN_CODE
-#define ASM_OUTPUT_ALIGN_CODE(FILE) \
- fprintf ((FILE), "\t.align %d,0x90\n", i386_align_jumps)
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
-/* Align start of loop at 4-byte boundary. */
-
-#undef ASM_OUTPUT_LOOP_ALIGN
-#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
- fprintf ((FILE), "\t.align %d,0x90\n", i386_align_loops)
+ This is used to align code labels according to Intel recommendations. */
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+# define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG)!=0) \
+ if ((MAX_SKIP)==0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP))
+#endif
/* A C statement or statements which output an assembler instruction
opcode to the stdio stream STREAM. The macro-operand PTR is a
@@ -126,8 +132,8 @@ Boston, MA 02111-1307, USA. */
GAS requires the %cl argument, so override i386/unix.h. */
-#undef AS3_SHIFT_DOUBLE
-#define AS3_SHIFT_DOUBLE(a,b,c,d) AS3 (a,b,c,d)
+#undef SHIFT_DOUBLE_OMITS_COUNT
+#define SHIFT_DOUBLE_OMITS_COUNT 0
/* Print opcodes the way that GAS expects them. */
#define GAS_MNEMONICS 1
diff --git a/contrib/gcc/config/i386/gmon-sol2.c b/contrib/gcc/config/i386/gmon-sol2.c
new file mode 100644
index 0000000..35ac1c9
--- /dev/null
+++ b/contrib/gcc/config/i386/gmon-sol2.c
@@ -0,0 +1,409 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * This is a modified gmon.c by J.W.Hawtin <oolon@ankh.org>,
+ * 14/8/96 based on the original gmon.c in GCC and the hacked version
+ * solaris 2 sparc version (config/sparc/gmon-sol.c) by Mark Eichin. To do
+ * process profiling on solaris 2.X X86
+ *
+ * It must be used in conjunction with sol2-gc1.asm, which is used to start
+ * and stop process monitoring.
+ *
+ * Differences.
+ *
+ * On Solaris 2 _mcount is called by library functions not mcount, so support
+ * has been added for both.
+ *
+ * Also the prototype for profil() is different
+ *
+ * Solaris 2 does not seem to have char *minbrk whcih allows the setting of
+ * the minimum SBRK region so this code has been removed and lets pray malloc
+ * does not mess it up.
+ *
+ * Notes
+ *
+ * This code could easily be integrated with the original gmon.c and perhaps
+ * should be.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)gmon.c 5.3 (Berkeley) 5/22/91";
+#endif /* not lint */
+
+#if 0
+#include <unistd.h>
+
+#endif
+#ifdef DEBUG
+#include <stdio.h>
+#endif
+
+#if 0
+#include "i386/gmon.h"
+#else
+
+struct phdr {
+ char *lpc;
+ char *hpc;
+ int ncnt;
+};
+
+
+#define HISTFRACTION 2
+#define HISTCOUNTER unsigned short
+#define HASHFRACTION 1
+#define ARCDENSITY 2
+#define MINARCS 50
+#define BASEADDRESS 0x8000000 /* On Solaris 2 X86 all executables start here
+ and not at 0 */
+
+struct tostruct {
+ char *selfpc;
+ long count;
+ unsigned short link;
+};
+struct rawarc {
+ unsigned long raw_frompc;
+ unsigned long raw_selfpc;
+ long raw_count;
+};
+#define ROUNDDOWN(x,y) (((x)/(y))*(y))
+#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
+#endif
+
+/* char *minbrk; */
+
+#ifdef __alpha
+extern char *sbrk ();
+#endif
+
+ /*
+ * froms is actually a bunch of unsigned shorts indexing tos
+ */
+static int profiling = 3;
+static unsigned short *froms;
+static struct tostruct *tos = 0;
+static long tolimit = 0;
+static char *s_lowpc = 0;
+static char *s_highpc = 0;
+static unsigned long s_textsize = 0;
+
+static int ssiz;
+static char *sbuf;
+static int s_scale;
+ /* see profil(2) where this is describe (incorrectly) */
+#define SCALE_1_TO_1 0x10000L
+
+#define MSG "No space for profiling buffer(s)\n"
+
+extern int errno;
+
+monstartup(lowpc, highpc)
+ char *lowpc;
+ char *highpc;
+{
+ int monsize;
+ char *buffer;
+ register int o;
+
+ /*
+ * round lowpc and highpc to multiples of the density we're using
+ * so the rest of the scaling (here and in gprof) stays in ints.
+ */
+ lowpc = (char *)
+ ROUNDDOWN((unsigned)lowpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_lowpc = lowpc;
+ highpc = (char *)
+ ROUNDUP((unsigned)highpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_highpc = highpc;
+ s_textsize = highpc - lowpc;
+ monsize = (s_textsize / HISTFRACTION) + sizeof(struct phdr);
+ buffer = (char *) sbrk( monsize );
+ if ( buffer == (char *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ return;
+ }
+ froms = (unsigned short *) sbrk( s_textsize / HASHFRACTION );
+ if ( froms == (unsigned short *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ return;
+ }
+ tolimit = s_textsize * ARCDENSITY / 100;
+ if ( tolimit < MINARCS ) {
+ tolimit = MINARCS;
+ } else if ( tolimit > 65534 ) {
+ tolimit = 65534;
+ }
+ tos = (struct tostruct *) sbrk( tolimit * sizeof( struct tostruct ) );
+ if ( tos == (struct tostruct *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ tos = 0;
+ return;
+ }
+/* minbrk = (char *) sbrk(0);*/
+ tos[0].link = 0;
+ sbuf = buffer;
+ ssiz = monsize;
+ ( (struct phdr *) buffer ) -> lpc = lowpc;
+ ( (struct phdr *) buffer ) -> hpc = highpc;
+ ( (struct phdr *) buffer ) -> ncnt = ssiz;
+ monsize -= sizeof(struct phdr);
+ if ( monsize <= 0 )
+ return;
+ o = highpc - lowpc;
+ if( monsize < o )
+#ifndef hp300
+ s_scale = ( (float) monsize / o ) * SCALE_1_TO_1;
+#else /* avoid floating point */
+ {
+ int quot = o / monsize;
+
+ if (quot >= 0x10000)
+ s_scale = 1;
+ else if (quot >= 0x100)
+ s_scale = 0x10000 / quot;
+ else if (o >= 0x800000)
+ s_scale = 0x1000000 / (o / (monsize >> 8));
+ else
+ s_scale = 0x1000000 / ((o << 8) / monsize);
+ }
+#endif
+ else
+ s_scale = SCALE_1_TO_1;
+ moncontrol(1);
+}
+
+_mcleanup()
+{
+ int fd;
+ int fromindex;
+ int endfrom;
+ char *frompc;
+ int toindex;
+ struct rawarc rawarc;
+
+ moncontrol(0);
+ fd = creat( "gmon.out" , 0666 );
+ if ( fd < 0 ) {
+ perror( "mcount: gmon.out" );
+ return;
+ }
+# ifdef DEBUG
+ fprintf( stderr , "[mcleanup] sbuf 0x%x ssiz %d\n" , sbuf , ssiz );
+# endif DEBUG
+
+ write( fd , sbuf , ssiz );
+ endfrom = s_textsize / (HASHFRACTION * sizeof(*froms));
+ for ( fromindex = 0 ; fromindex < endfrom ; fromindex++ ) {
+ if ( froms[fromindex] == 0 ) {
+ continue;
+ }
+ frompc = s_lowpc + (fromindex * HASHFRACTION * sizeof(*froms));
+ for (toindex=froms[fromindex]; toindex!=0; toindex=tos[toindex].link) {
+# ifdef DEBUG
+ fprintf( stderr ,
+ "[mcleanup] frompc 0x%x selfpc 0x%x count %d\n" ,
+ frompc , tos[toindex].selfpc , tos[toindex].count );
+# endif DEBUG
+ rawarc.raw_frompc = (unsigned long) frompc;
+ rawarc.raw_selfpc = (unsigned long) tos[toindex].selfpc;
+ rawarc.raw_count = tos[toindex].count;
+ write( fd , &rawarc , sizeof rawarc );
+ }
+ }
+ close( fd );
+}
+
+/* Solaris 2 libraries use _mcount. */
+asm(".globl _mcount; _mcount: jmp internal_mcount");
+/* This is for compatibility with old versions of gcc which used mcount. */
+asm(".globl mcount; mcount: jmp internal_mcount");
+
+internal_mcount()
+{
+ register char *selfpc;
+ register unsigned short *frompcindex;
+ register struct tostruct *top;
+ register struct tostruct *prevtop;
+ register long toindex;
+ static char already_setup;
+
+ /*
+ * find the return address for mcount,
+ * and the return address for mcount's caller.
+ */
+
+ /* selfpc = pc pushed by mcount call.
+ This identifies the function that was just entered. */
+ selfpc = (void *) __builtin_return_address (0);
+ /* frompcindex = pc in preceding frame.
+ This identifies the caller of the function just entered. */
+ frompcindex = (void *) __builtin_return_address (1);
+
+ if(!already_setup) {
+ extern etext();
+ already_setup = 1;
+/* monstartup(0, etext); */
+ monstartup(0x08040000, etext);
+#ifdef USE_ONEXIT
+ on_exit(_mcleanup, 0);
+#else
+ atexit(_mcleanup);
+#endif
+ }
+ /*
+ * check that we are profiling
+ * and that we aren't recursively invoked.
+ */
+ if (profiling) {
+ goto out;
+ }
+ profiling++;
+ /*
+ * check that frompcindex is a reasonable pc value.
+ * for example: signal catchers get called from the stack,
+ * not from text space. too bad.
+ */
+ frompcindex = (unsigned short *)((long)frompcindex - (long)s_lowpc);
+ if ((unsigned long)frompcindex > s_textsize) {
+ goto done;
+ }
+ frompcindex =
+ &froms[((long)frompcindex) / (HASHFRACTION * sizeof(*froms))];
+ toindex = *frompcindex;
+ if (toindex == 0) {
+ /*
+ * first time traversing this arc
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ *frompcindex = toindex;
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = 0;
+ goto done;
+ }
+ top = &tos[toindex];
+ if (top->selfpc == selfpc) {
+ /*
+ * arc at front of chain; usual case.
+ */
+ top->count++;
+ goto done;
+ }
+ /*
+ * have to go looking down chain for it.
+ * top points to what we are looking at,
+ * prevtop points to previous top.
+ * we know it is not at the head of the chain.
+ */
+ for (; /* goto done */; ) {
+ if (top->link == 0) {
+ /*
+ * top is end of the chain and none of the chain
+ * had top->selfpc == selfpc.
+ * so we allocate a new tostruct
+ * and link it to the head of the chain.
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+ /*
+ * otherwise, check the next arc on the chain.
+ */
+ prevtop = top;
+ top = &tos[top->link];
+ if (top->selfpc == selfpc) {
+ /*
+ * there it is.
+ * increment its count
+ * move it to the head of the chain.
+ */
+ top->count++;
+ toindex = prevtop->link;
+ prevtop->link = top->link;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+
+ }
+done:
+ profiling--;
+ /* and fall through */
+out:
+ return; /* normal return restores saved registers */
+
+overflow:
+ profiling++; /* halt further profiling */
+# define TOLIMIT "mcount: tos overflow\n"
+ write(2, TOLIMIT, sizeof(TOLIMIT));
+ goto out;
+}
+
+/*
+ * Control profiling
+ * profiling is what mcount checks to see if
+ * all the data structures are ready.
+ */
+moncontrol(mode)
+ int mode;
+{
+ if (mode)
+ {
+ /* start */
+ profil((unsigned short *)(sbuf + sizeof(struct phdr)),
+ ssiz - sizeof(struct phdr),
+ (int)s_lowpc, s_scale);
+
+ profiling = 0;
+ } else {
+ /* stop */
+ profil((unsigned short *)0, 0, 0, 0);
+ profiling = 3;
+ }
+}
diff --git a/contrib/gcc/config/i386/gnu.h b/contrib/gcc/config/i386/gnu.h
index 1ad5df9..971a5f8 100644
--- a/contrib/gcc/config/i386/gnu.h
+++ b/contrib/gcc/config/i386/gnu.h
@@ -4,16 +4,19 @@
#include <i386/linux.h>
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES GNU_CPP_PREDEFINES("i386")
+#define CPP_PREDEFINES "-Di386 -Acpu(i386) -Amachine(i386) \
+-Dunix -Asystem(unix) -DMACH -Asystem(mach) -D__GNU__ -Asystem(gnu)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 GNU)");
#undef LINK_SPEC
#define LINK_SPEC "-m elf_i386 %{shared:-shared} \
%{!shared: \
- %{!ibcs: \
- %{!static: \
- %{rdynamic:-export-dynamic} \
- %{!dynamic-linker:-dynamic-linker /lib/ld.so} \
- %{!rpath:-rpath /lib/}} %{static:-static}}}"
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld.so}} \
+ %{static:-static}}"
/* Get machine-independent configuration parameters for the GNU system. */
diff --git a/contrib/gcc/config/i386/go32.h b/contrib/gcc/config/i386/go32.h
index 5618a0d..dd03cc8 100644
--- a/contrib/gcc/config/i386/go32.h
+++ b/contrib/gcc/config/i386/go32.h
@@ -1,5 +1,7 @@
/* Configuration for an i386 running MS-DOS with djgpp/go32. */
+#include "dbxcoff.h"
+
/* Don't assume anything about the header files. */
#define NO_IMPLICIT_EXTERN_C
@@ -53,7 +55,13 @@ dtor_section () \
fprintf (FILE, "\n"); \
} while (0)
-#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+/* Allow (eg) __attribute__((section "locked")) to work */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC)\
+ do { \
+ fprintf (FILE, "\t.section %s\n", NAME); \
+ } while (0)
+
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
do { \
dtor_section (); \
fprintf (FILE, "%s\t", ASM_LONG); \
@@ -61,4 +69,28 @@ dtor_section () \
fprintf (FILE, "\n"); \
} while (0)
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+/* Use the main_input_filename instead of dump_base_name */
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ } while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) fprintf ((FILE), "\t.p2align %d\n", LOG)
+
+/* djgpp has atexit (). */
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+/* djgpp automatically calls its own version of __main, so don't define one
+ in libgcc, nor call one in main(). */
+#define HAS_INIT_SECTION
diff --git a/contrib/gcc/config/i386/i386.c b/contrib/gcc/config/i386/i386.c
index 48c58a0..b2512a0 100644
--- a/contrib/gcc/config/i386/i386.c
+++ b/contrib/gcc/config/i386/i386.c
@@ -1,5 +1,5 @@
/* Subroutines for insn-output.c for Intel X86.
- Copyright (C) 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1988, 92, 94-98, 1999 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -16,12 +16,11 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
+Boston, MA 02111-1307, USA. */
-#include <stdio.h>
#include <setjmp.h>
-#include <ctype.h>
#include "config.h"
+#include "system.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -33,7 +32,11 @@ Boston, MA 02111-1307, USA. */
#include "insn-attr.h"
#include "tree.h"
#include "flags.h"
+#include "except.h"
#include "function.h"
+#include "recog.h"
+#include "expr.h"
+#include "toplev.h"
#ifdef EXTRA_CONSTRAINT
/* If EXTRA_CONSTRAINT is defined, then the 'S'
@@ -44,11 +47,69 @@ Boston, MA 02111-1307, USA. */
even if the conditional was untrue. */
#endif
-#define AT_BP(mode) (gen_rtx (MEM, (mode), frame_pointer_rtx))
+#ifndef CHECK_STACK_LIMIT
+#define CHECK_STACK_LIMIT -1
+#endif
+
+/* Type of an operand for ix86_{binary,unary}_operator_ok */
+enum reg_mem
+{
+ reg_p,
+ mem_p,
+ imm_p
+};
+
+/* Processor costs (relative to an add) */
+struct processor_costs i386_cost = { /* 386 specific costs */
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 3, /* variable shift costs */
+ 2, /* constant shift costs */
+ 6, /* cost of starting a multiply */
+ 1, /* cost of multiply per each bit set */
+ 23 /* cost of a divide/mod */
+};
+
+struct processor_costs i486_cost = { /* 486 specific costs */
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 3, /* variable shift costs */
+ 2, /* constant shift costs */
+ 12, /* cost of starting a multiply */
+ 1, /* cost of multiply per each bit set */
+ 40 /* cost of a divide/mod */
+};
+
+struct processor_costs pentium_cost = {
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 4, /* variable shift costs */
+ 1, /* constant shift costs */
+ 11, /* cost of starting a multiply */
+ 0, /* cost of multiply per each bit set */
+ 25 /* cost of a divide/mod */
+};
+
+struct processor_costs pentiumpro_cost = {
+ 1, /* cost of an add instruction */
+ 1, /* cost of a lea instruction */
+ 3, /* variable shift costs */
+ 1, /* constant shift costs */
+ 4, /* cost of starting a multiply */
+ 0, /* cost of multiply per each bit set */
+ 17 /* cost of a divide/mod */
+};
+
+struct processor_costs *ix86_cost = &pentium_cost;
+
+#define AT_BP(mode) (gen_rtx_MEM ((mode), frame_pointer_rtx))
extern FILE *asm_out_file;
extern char *strcat ();
+static void ix86_epilogue PROTO((int));
+static void ix86_prologue PROTO((int));
+
char *singlemove_string ();
char *output_move_const_single ();
char *output_fp_cc0_set ();
@@ -68,7 +129,7 @@ enum reg_class regclass_map[FIRST_PSEUDO_REGISTER] =
SIREG, DIREG, INDEX_REGS, GENERAL_REGS,
/* FP registers */
FP_TOP_REG, FP_SECOND_REG, FLOAT_REGS, FLOAT_REGS,
- FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
+ FLOAT_REGS, FLOAT_REGS, FLOAT_REGS, FLOAT_REGS,
/* arg pointer */
INDEX_REGS
};
@@ -80,24 +141,48 @@ struct rtx_def *i386_compare_op0 = NULL_RTX;
struct rtx_def *i386_compare_op1 = NULL_RTX;
struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
+/* which cpu are we scheduling for */
+enum processor_type ix86_cpu;
+
+/* which instruction set architecture to use. */
+int ix86_arch;
+
+/* Strings to hold which cpu and instruction set architecture to use. */
+char *ix86_cpu_string; /* for -mcpu=<xxx> */
+char *ix86_arch_string; /* for -march=<xxx> */
+
/* Register allocation order */
char *i386_reg_alloc_order;
static char regs_allocated[FIRST_PSEUDO_REGISTER];
/* # of registers to use to pass arguments. */
-char *i386_regparm_string; /* # registers to use to pass args */
-int i386_regparm; /* i386_regparm_string as a number */
+char *i386_regparm_string;
-/* Alignment to use for loops and jumps */
-char *i386_align_loops_string; /* power of two alignment for loops */
-char *i386_align_jumps_string; /* power of two alignment for non-loop jumps */
-char *i386_align_funcs_string; /* power of two alignment for functions */
+/* i386_regparm_string as a number */
+int i386_regparm;
-int i386_align_loops; /* power of two alignment for loops */
-int i386_align_jumps; /* power of two alignment for non-loop jumps */
-int i386_align_funcs; /* power of two alignment for functions */
+/* Alignment to use for loops and jumps: */
+
+/* Power of two alignment for loops. */
+char *i386_align_loops_string;
+
+/* Power of two alignment for non-loop jumps. */
+char *i386_align_jumps_string;
+
+/* Values 1-5: see jump.c */
+int i386_branch_cost;
+char *i386_branch_cost_string;
+
+/* Power of two alignment for functions. */
+int i386_align_funcs;
+char *i386_align_funcs_string;
+
+/* Power of two alignment for loops. */
+int i386_align_loops;
+
+/* Power of two alignment for non-loop jumps. */
+int i386_align_jumps;
-
/* Sometimes certain combinations of command options do not make
sense on a particular target machine. You can define a macro
`OVERRIDE_OPTIONS' to take account of this. This macro, if
@@ -110,19 +195,39 @@ int i386_align_funcs; /* power of two alignment for functions */
void
override_options ()
{
- int ch, i, regno;
- char *p;
+ int ch, i, j;
int def_align;
+ static struct ptt
+ {
+ char *name; /* Canonical processor name. */
+ enum processor_type processor; /* Processor type enum value. */
+ struct processor_costs *cost; /* Processor costs */
+ int target_enable; /* Target flags to enable. */
+ int target_disable; /* Target flags to disable. */
+ } processor_target_table[]
+ = {{PROCESSOR_I386_STRING, PROCESSOR_I386, &i386_cost, 0, 0},
+ {PROCESSOR_I486_STRING, PROCESSOR_I486, &i486_cost, 0, 0},
+ {PROCESSOR_I586_STRING, PROCESSOR_PENTIUM, &pentium_cost, 0, 0},
+ {PROCESSOR_PENTIUM_STRING, PROCESSOR_PENTIUM, &pentium_cost, 0, 0},
+ {PROCESSOR_I686_STRING, PROCESSOR_PENTIUMPRO, &pentiumpro_cost,
+ 0, 0},
+ {PROCESSOR_PENTIUMPRO_STRING, PROCESSOR_PENTIUMPRO,
+ &pentiumpro_cost, 0, 0}};
+
+ int ptt_size = sizeof (processor_target_table) / sizeof (struct ptt);
+
#ifdef SUBTARGET_OVERRIDE_OPTIONS
SUBTARGET_OVERRIDE_OPTIONS;
#endif
- /* Validate registers in register allocation order */
+ /* Validate registers in register allocation order. */
if (i386_reg_alloc_order)
{
for (i = 0; (ch = i386_reg_alloc_order[i]) != '\0'; i++)
{
+ int regno = 0;
+
switch (ch)
{
case 'a': regno = 0; break;
@@ -137,23 +242,75 @@ override_options ()
}
if (regs_allocated[regno])
- fatal ("Register '%c' was already specified in the allocation order", ch);
+ fatal ("Register '%c' already specified in allocation order", ch);
regs_allocated[regno] = 1;
}
}
- /* Validate -mregparm= value */
+ if (ix86_arch_string == 0)
+ {
+ ix86_arch_string = PROCESSOR_PENTIUM_STRING;
+ if (ix86_cpu_string == 0)
+ ix86_cpu_string = PROCESSOR_DEFAULT_STRING;
+ }
+
+ for (i = 0; i < ptt_size; i++)
+ if (! strcmp (ix86_arch_string, processor_target_table[i].name))
+ {
+ ix86_arch = processor_target_table[i].processor;
+ if (ix86_cpu_string == 0)
+ ix86_cpu_string = processor_target_table[i].name;
+ break;
+ }
+
+ if (i == ptt_size)
+ {
+ error ("bad value (%s) for -march= switch", ix86_arch_string);
+ ix86_arch_string = PROCESSOR_PENTIUM_STRING;
+ ix86_arch = PROCESSOR_DEFAULT;
+ }
+
+ if (ix86_cpu_string == 0)
+ ix86_cpu_string = PROCESSOR_DEFAULT_STRING;
+
+ for (j = 0; j < ptt_size; j++)
+ if (! strcmp (ix86_cpu_string, processor_target_table[j].name))
+ {
+ ix86_cpu = processor_target_table[j].processor;
+ ix86_cost = processor_target_table[j].cost;
+ if (i > j && (int) ix86_arch >= (int) PROCESSOR_PENTIUMPRO)
+ error ("-mcpu=%s does not support -march=%s",
+ ix86_cpu_string, ix86_arch_string);
+
+ target_flags |= processor_target_table[j].target_enable;
+ target_flags &= ~processor_target_table[j].target_disable;
+ break;
+ }
+
+ if (j == ptt_size)
+ {
+ error ("bad value (%s) for -mcpu= switch", ix86_cpu_string);
+ ix86_cpu_string = PROCESSOR_DEFAULT_STRING;
+ ix86_cpu = PROCESSOR_DEFAULT;
+ }
+
+ /* Validate -mregparm= value. */
if (i386_regparm_string)
{
i386_regparm = atoi (i386_regparm_string);
if (i386_regparm < 0 || i386_regparm > REGPARM_MAX)
- fatal ("-mregparm=%d is not between 0 and %d", i386_regparm, REGPARM_MAX);
+ fatal ("-mregparm=%d is not between 0 and %d",
+ i386_regparm, REGPARM_MAX);
}
- def_align = (TARGET_386) ? 2 : 4;
+ /* The 486 suffers more from non-aligned cache line fills, and the
+ larger code size results in a larger cache foot-print and more misses.
+ The 486 has a 16 byte cache line, pentium and pentiumpro have a 32 byte
+ cache line. */
+ def_align = (TARGET_486) ? 4 : 2;
- /* Validate -malign-loops= value, or provide default */
+ /* Validate -malign-loops= value, or provide default. */
if (i386_align_loops_string)
{
i386_align_loops = atoi (i386_align_loops_string);
@@ -162,9 +319,13 @@ override_options ()
i386_align_loops, MAX_CODE_ALIGN);
}
else
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ i386_align_loops = 4;
+#else
i386_align_loops = 2;
+#endif
- /* Validate -malign-jumps= value, or provide default */
+ /* Validate -malign-jumps= value, or provide default. */
if (i386_align_jumps_string)
{
i386_align_jumps = atoi (i386_align_jumps_string);
@@ -173,9 +334,13 @@ override_options ()
i386_align_jumps, MAX_CODE_ALIGN);
}
else
+#ifdef ASM_OUTPUT_MAX_SKIP_ALIGN
+ i386_align_jumps = 4;
+#else
i386_align_jumps = def_align;
+#endif
- /* Validate -malign-functions= value, or provide default */
+ /* Validate -malign-functions= value, or provide default. */
if (i386_align_funcs_string)
{
i386_align_funcs = atoi (i386_align_funcs_string);
@@ -185,6 +350,21 @@ override_options ()
}
else
i386_align_funcs = def_align;
+
+ /* Validate -mbranch-cost= value, or provide default. */
+ if (i386_branch_cost_string)
+ {
+ i386_branch_cost = atoi (i386_branch_cost_string);
+ if (i386_branch_cost < 0 || i386_branch_cost > 5)
+ fatal ("-mbranch-cost=%d is not between 0 and 5",
+ i386_branch_cost);
+ }
+ else
+ i386_branch_cost = 1;
+
+ /* Keep nonleaf frame pointers. */
+ if (TARGET_OMIT_LEAF_FRAME_POINTER)
+ flag_omit_frame_pointer = 1;
}
/* A C statement (sans semicolon) to choose the order in which to
@@ -203,13 +383,16 @@ override_options ()
void
order_regs_for_local_alloc ()
{
- int i, ch, order, regno;
+ int i, ch, order;
+
+ /* User specified the register allocation order. */
- /* User specified the register allocation order */
if (i386_reg_alloc_order)
{
for (i = order = 0; (ch = i386_reg_alloc_order[i]) != '\0'; i++)
{
+ int regno = 0;
+
switch (ch)
{
case 'a': regno = 0; break;
@@ -226,59 +409,116 @@ order_regs_for_local_alloc ()
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
{
- if (!regs_allocated[i])
+ if (! regs_allocated[i])
reg_alloc_order[order++] = i;
}
}
- /* If users did not specify a register allocation order, favor eax
- normally except if DImode variables are used, in which case
- favor edx before eax, which seems to cause less spill register
- not found messages. */
+ /* If user did not specify a register allocation order, use natural order. */
else
{
- rtx insn;
-
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
reg_alloc_order[i] = i;
+ }
+}
+
+void
+optimization_options (level, size)
+ int level;
+ int size ATTRIBUTE_UNUSED;
+{
+ /* For -O2 and beyond, turn off -fschedule-insns by default. It tends to
+ make the problem with not enough registers even worse. */
+#ifdef INSN_SCHEDULING
+ if (level > 1)
+ flag_schedule_insns = 0;
+#endif
+}
+
+/* Sign-extend a 16-bit constant */
- if (optimize)
- {
- int use_dca = FALSE;
+struct rtx_def *
+i386_sext16_if_const (op)
+ struct rtx_def *op;
+{
+ if (GET_CODE (op) == CONST_INT)
+ {
+ HOST_WIDE_INT val = INTVAL (op);
+ HOST_WIDE_INT sext_val;
+ if (val & 0x8000)
+ sext_val = val | ~0xffff;
+ else
+ sext_val = val & 0xffff;
+ if (sext_val != val)
+ op = GEN_INT (sext_val);
+ }
+ return op;
+}
+
+/* Return nonzero if the rtx is aligned */
- for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
- {
- if (GET_CODE (insn) == INSN)
- {
- rtx set = NULL_RTX;
- rtx pattern = PATTERN (insn);
-
- if (GET_CODE (pattern) == SET)
- set = pattern;
-
- else if ((GET_CODE (pattern) == PARALLEL
- || GET_CODE (pattern) == SEQUENCE)
- && GET_CODE (XVECEXP (pattern, 0, 0)) == SET)
- set = XVECEXP (pattern, 0, 0);
-
- if (set && GET_MODE (SET_SRC (set)) == DImode)
- {
- use_dca = TRUE;
- break;
- }
- }
- }
+static int
+i386_aligned_reg_p (regno)
+ int regno;
+{
+ return (regno == STACK_POINTER_REGNUM
+ || (! flag_omit_frame_pointer && regno == FRAME_POINTER_REGNUM));
+}
- if (use_dca)
- {
- reg_alloc_order[0] = 1; /* edx */
- reg_alloc_order[1] = 2; /* ecx */
- reg_alloc_order[2] = 0; /* eax */
- }
- }
+int
+i386_aligned_p (op)
+ rtx op;
+{
+ /* Registers and immediate operands are always "aligned". */
+ if (GET_CODE (op) != MEM)
+ return 1;
+
+ /* Don't even try to do any aligned optimizations with volatiles. */
+ if (MEM_VOLATILE_P (op))
+ return 0;
+
+ /* Get address of memory operand. */
+ op = XEXP (op, 0);
+
+ switch (GET_CODE (op))
+ {
+ case CONST_INT:
+ if (INTVAL (op) & 3)
+ break;
+ return 1;
+
+ /* Match "reg + offset" */
+ case PLUS:
+ if (GET_CODE (XEXP (op, 1)) != CONST_INT)
+ break;
+ if (INTVAL (XEXP (op, 1)) & 3)
+ break;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != REG)
+ break;
+
+ /* ... fall through ... */
+
+ case REG:
+ return i386_aligned_reg_p (REGNO (op));
+
+ default:
+ break;
}
+
+ return 0;
}
+
+/* Return nonzero if INSN looks like it won't compute useful cc bits
+ as a side effect. This information is only a hint. */
+int
+i386_cc_probably_useless_p (insn)
+ rtx insn;
+{
+ return ! next_cc0_user (insn);
+}
/* Return nonzero if IDENTIFIER with arguments ARGS is a valid machine specific
attribute for DECL. The attributes in ATTRIBUTES have previously been
@@ -286,10 +526,10 @@ order_regs_for_local_alloc ()
int
i386_valid_decl_attribute_p (decl, attributes, identifier, args)
- tree decl;
- tree attributes;
- tree identifier;
- tree args;
+ tree decl ATTRIBUTE_UNUSED;
+ tree attributes ATTRIBUTE_UNUSED;
+ tree identifier ATTRIBUTE_UNUSED;
+ tree args ATTRIBUTE_UNUSED;
{
return 0;
}
@@ -301,11 +541,12 @@ i386_valid_decl_attribute_p (decl, attributes, identifier, args)
int
i386_valid_type_attribute_p (type, attributes, identifier, args)
tree type;
- tree attributes;
+ tree attributes ATTRIBUTE_UNUSED;
tree identifier;
tree args;
{
if (TREE_CODE (type) != FUNCTION_TYPE
+ && TREE_CODE (type) != METHOD_TYPE
&& TREE_CODE (type) != FIELD_DECL
&& TREE_CODE (type) != TYPE_DECL)
return 0;
@@ -315,17 +556,17 @@ i386_valid_type_attribute_p (type, attributes, identifier, args)
if (is_attribute_p ("stdcall", identifier))
return (args == NULL_TREE);
- /* Cdecl attribute says the callee is a normal C declaration */
+ /* Cdecl attribute says the callee is a normal C declaration. */
if (is_attribute_p ("cdecl", identifier))
return (args == NULL_TREE);
/* Regparm attribute specifies how many integer arguments are to be
- passed in registers */
+ passed in registers. */
if (is_attribute_p ("regparm", identifier))
{
tree cst;
- if (!args || TREE_CODE (args) != TREE_LIST
+ if (! args || TREE_CODE (args) != TREE_LIST
|| TREE_CHAIN (args) != NULL_TREE
|| TREE_VALUE (args) == NULL_TREE)
return 0;
@@ -351,8 +592,8 @@ i386_valid_type_attribute_p (type, attributes, identifier, args)
int
i386_comp_type_attributes (type1, type2)
- tree type1;
- tree type2;
+ tree type1 ATTRIBUTE_UNUSED;
+ tree type2 ATTRIBUTE_UNUSED;
{
return 1;
}
@@ -381,30 +622,27 @@ i386_return_pops_args (fundecl, funtype, size)
tree funtype;
int size;
{
- int rtd = TARGET_RTD;
-
- if (TREE_CODE (funtype) == IDENTIFIER_NODE)
- return 0;
+ int rtd = TARGET_RTD && (!fundecl || TREE_CODE (fundecl) != IDENTIFIER_NODE);
- /* Cdecl functions override -mrtd, and never pop the stack */
- if (lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype)))
- return 0;
+ /* Cdecl functions override -mrtd, and never pop the stack. */
+ if (! lookup_attribute ("cdecl", TYPE_ATTRIBUTES (funtype))) {
- /* Stdcall functions will pop the stack if not variable args */
- if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype)))
- rtd = 1;
+ /* Stdcall functions will pop the stack if not variable args. */
+ if (lookup_attribute ("stdcall", TYPE_ATTRIBUTES (funtype)))
+ rtd = 1;
- if (rtd)
- {
- if (TYPE_ARG_TYPES (funtype) == NULL_TREE
- || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype))) == void_type_node))
- return size;
+ if (rtd
+ && (TYPE_ARG_TYPES (funtype) == NULL_TREE
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (funtype)))
+ == void_type_node)))
+ return size;
+ }
- if (aggregate_value_p (TREE_TYPE (funtype)))
- return GET_MODE_SIZE (Pmode);
- }
+ /* Lose any fake structure return argument. */
+ if (aggregate_value_p (TREE_TYPE (funtype)))
+ return GET_MODE_SIZE (Pmode);
- return 0;
+ return 0;
}
@@ -416,7 +654,7 @@ i386_return_pops_args (fundecl, funtype, size)
void
init_cumulative_args (cum, fntype, libname)
- CUMULATIVE_ARGS *cum; /* argument info to initialize */
+ CUMULATIVE_ARGS *cum; /* Argument info to initialize */
tree fntype; /* tree ptr for function decl */
rtx libname; /* SYMBOL_REF of library name or 0 */
{
@@ -427,12 +665,9 @@ init_cumulative_args (cum, fntype, libname)
{
fprintf (stderr, "\ninit_cumulative_args (");
if (fntype)
- {
- tree ret_type = TREE_TYPE (fntype);
- fprintf (stderr, "fntype code = %s, ret code = %s",
- tree_code_name[ (int)TREE_CODE (fntype) ],
- tree_code_name[ (int)TREE_CODE (ret_type) ]);
- }
+ fprintf (stderr, "fntype code = %s, ret code = %s",
+ tree_code_name[(int) TREE_CODE (fntype)],
+ tree_code_name[(int) TREE_CODE (TREE_TYPE (fntype))]);
else
fprintf (stderr, "no fntype");
@@ -447,6 +682,7 @@ init_cumulative_args (cum, fntype, libname)
if (fntype)
{
tree attr = lookup_attribute ("regparm", TYPE_ATTRIBUTES (fntype));
+
if (attr)
cum->nregs = TREE_INT_CST_LOW (TREE_VALUE (TREE_VALUE (attr)));
}
@@ -459,11 +695,10 @@ init_cumulative_args (cum, fntype, libname)
if (cum->nregs)
{
for (param = (fntype) ? TYPE_ARG_TYPES (fntype) : 0;
- param != (tree)0;
- param = next_param)
+ param != 0; param = next_param)
{
next_param = TREE_CHAIN (param);
- if (next_param == (tree)0 && TREE_VALUE (param) != void_type_node)
+ if (next_param == 0 && TREE_VALUE (param) != void_type_node)
cum->nregs = 0;
}
}
@@ -485,12 +720,13 @@ function_arg_advance (cum, mode, type, named)
tree type; /* type of the argument or 0 if lib support */
int named; /* whether or not the argument was named */
{
- int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ int bytes
+ = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
if (TARGET_DEBUG_ARG)
fprintf (stderr,
- "function_adv( size=%d, words=%2d, nregs=%d, mode=%4s, named=%d )\n\n",
+ "function_adv (sz=%d, wds=%2d, nregs=%d, mode=%s, named=%d)\n\n",
words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
cum->words += words;
@@ -527,12 +763,14 @@ function_arg (cum, mode, type, named)
int named; /* != 0 for normal args, == 0 for ... args */
{
rtx ret = NULL_RTX;
- int bytes = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
+ int bytes
+ = (mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
int words = (bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
switch (mode)
{
- default: /* for now, pass fp/complex values on the stack */
+ /* For now, pass fp/complex values on the stack. */
+ default:
break;
case BLKmode:
@@ -541,14 +779,14 @@ function_arg (cum, mode, type, named)
case HImode:
case QImode:
if (words <= cum->nregs)
- ret = gen_rtx (REG, mode, cum->regno);
+ ret = gen_rtx_REG (mode, cum->regno);
break;
}
if (TARGET_DEBUG_ARG)
{
fprintf (stderr,
- "function_arg( size=%d, words=%2d, nregs=%d, mode=%4s, named=%d",
+ "function_arg (size=%d, wds=%2d, nregs=%d, mode=%4s, named=%d",
words, cum->words, cum->nregs, GET_MODE_NAME (mode), named);
if (ret)
@@ -568,14 +806,13 @@ function_arg (cum, mode, type, named)
int
function_arg_partial_nregs (cum, mode, type, named)
- CUMULATIVE_ARGS *cum; /* current arg information */
- enum machine_mode mode; /* current arg mode */
- tree type; /* type of the argument or 0 if lib support */
- int named; /* != 0 for normal args, == 0 for ... args */
+ CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED; /* current arg information */
+ enum machine_mode mode ATTRIBUTE_UNUSED; /* current arg mode */
+ tree type ATTRIBUTE_UNUSED; /* type of the argument or 0 if lib support */
+ int named ATTRIBUTE_UNUSED; /* != 0 for normal args, == 0 for ... args */
{
return 0;
}
-
/* Output an insn whose source is a 386 integer register. SRC is the
rtx for the register, and TEMPLATE is the op-code template. SRC may
@@ -609,18 +846,19 @@ output_op_from_reg (src, template)
if (size > UNITS_PER_WORD)
{
rtx high;
+
if (size > 2 * UNITS_PER_WORD)
{
- high = gen_rtx (REG, SImode, REGNO (src) + 2);
+ high = gen_rtx_REG (SImode, REGNO (src) + 2);
output_asm_insn (AS1 (push%L0,%0), &high);
}
- high = gen_rtx (REG, SImode, REGNO (src) + 1);
+
+ high = gen_rtx_REG (SImode, REGNO (src) + 1);
output_asm_insn (AS1 (push%L0,%0), &high);
}
- output_asm_insn (AS1 (push%L0,%0), &src);
+ output_asm_insn (AS1 (push%L0,%0), &src);
output_asm_insn (template, xops);
-
output_asm_insn (AS2 (add%L3,%2,%3), xops);
}
@@ -630,27 +868,45 @@ output_op_from_reg (src, template)
otherwise a `fst' float store is done. */
void
-output_to_reg (dest, dies)
+output_to_reg (dest, dies, scratch_mem)
rtx dest;
int dies;
+ rtx scratch_mem;
{
rtx xops[4];
int size = GET_MODE_SIZE (GET_MODE (dest));
- xops[0] = AT_SP (Pmode);
+ if (! scratch_mem)
+ xops[0] = AT_SP (Pmode);
+ else
+ xops[0] = scratch_mem;
+
xops[1] = stack_pointer_rtx;
xops[2] = GEN_INT (size);
xops[3] = dest;
- output_asm_insn (AS2 (sub%L1,%2,%1), xops);
+ if (! scratch_mem)
+ output_asm_insn (AS2 (sub%L1,%2,%1), xops);
if (GET_MODE_CLASS (GET_MODE (dest)) == MODE_INT)
{
if (dies)
output_asm_insn (AS1 (fistp%z3,%y0), xops);
+ else if (GET_MODE (xops[3]) == DImode && ! dies)
+ {
+ /* There is no DImode version of this without a stack pop, so
+ we must emulate it. It doesn't matter much what the second
+ instruction is, because the value being pushed on the FP stack
+ is not used except for the following stack popping store.
+ This case can only happen without optimization, so it doesn't
+ matter that it is inefficient. */
+ output_asm_insn (AS1 (fistp%z3,%0), xops);
+ output_asm_insn (AS1 (fild%z3,%0), xops);
+ }
else
output_asm_insn (AS1 (fist%z3,%y0), xops);
}
+
else if (GET_MODE_CLASS (GET_MODE (dest)) == MODE_FLOAT)
{
if (dies)
@@ -666,19 +922,38 @@ output_to_reg (dest, dies)
output_asm_insn (AS1 (fst%z3,%y0), xops);
}
}
+
else
abort ();
- output_asm_insn (AS1 (pop%L0,%0), &dest);
+ if (! scratch_mem)
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+ else
+ output_asm_insn (AS2 (mov%L0,%0,%3), xops);
+
if (size > UNITS_PER_WORD)
{
- dest = gen_rtx (REG, SImode, REGNO (dest) + 1);
- output_asm_insn (AS1 (pop%L0,%0), &dest);
+ dest = gen_rtx_REG (SImode, REGNO (dest) + 1);
+ if (! scratch_mem)
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+ else
+ {
+ xops[0] = adj_offsettable_operand (xops[0], 4);
+ xops[3] = dest;
+ output_asm_insn (AS2 (mov%L0,%0,%3), xops);
+ }
+
if (size > 2 * UNITS_PER_WORD)
{
- dest = gen_rtx (REG, SImode, REGNO (dest) + 1);
- output_asm_insn (AS1 (pop%L0,%0), &dest);
+ dest = gen_rtx_REG (SImode, REGNO (dest) + 1);
+ if (! scratch_mem)
+ output_asm_insn (AS1 (pop%L0,%0), &dest);
+ else
+ {
+ xops[0] = adj_offsettable_operand (xops[0], 4);
+ output_asm_insn (AS2 (mov%L0,%0,%3), xops);
+ }
}
}
}
@@ -696,9 +971,7 @@ singlemove_string (operands)
return "push%L1 %1";
}
else if (GET_CODE (operands[1]) == CONST_DOUBLE)
- {
- return output_move_const_single (operands);
- }
+ return output_move_const_single (operands);
else if (GET_CODE (operands[0]) == REG || GET_CODE (operands[1]) == REG)
return AS2 (mov%L0,%1,%0);
else if (CONSTANT_P (operands[1]))
@@ -730,11 +1003,11 @@ find_addr_reg (addr)
else
abort ();
}
+
if (GET_CODE (addr) == REG)
return addr;
abort ();
}
-
/* Output an insn to add the constant N to the register X. */
@@ -750,7 +1023,7 @@ asm_add (n, x)
output_asm_insn (AS1 (dec%L0,%0), xops);
else if (n == 1)
output_asm_insn (AS1 (inc%L0,%0), xops);
- else if (n < 0)
+ else if (n < 0 || n == 128)
{
xops[1] = GEN_INT (-n);
output_asm_insn (AS2 (sub%L0,%1,%0), xops);
@@ -761,7 +1034,6 @@ asm_add (n, x)
output_asm_insn (AS2 (add%L0,%1,%0), xops);
}
}
-
/* Output assembler code to perform a doubleword move insn
with operands OPERANDS. */
@@ -828,11 +1100,11 @@ output_move_double (operands)
operands[0] = XEXP (XEXP (operands[0], 0), 0);
asm_add (-size, operands[0]);
if (GET_MODE (operands[1]) == XFmode)
- operands[0] = gen_rtx (MEM, XFmode, operands[0]);
+ operands[0] = gen_rtx_MEM (XFmode, operands[0]);
else if (GET_MODE (operands[0]) == DFmode)
- operands[0] = gen_rtx (MEM, DFmode, operands[0]);
+ operands[0] = gen_rtx_MEM (DFmode, operands[0]);
else
- operands[0] = gen_rtx (MEM, DImode, operands[0]);
+ operands[0] = gen_rtx_MEM (DImode, operands[0]);
optype0 = OFFSOP;
}
@@ -842,11 +1114,11 @@ output_move_double (operands)
operands[1] = XEXP (XEXP (operands[1], 0), 0);
asm_add (-size, operands[1]);
if (GET_MODE (operands[1]) == XFmode)
- operands[1] = gen_rtx (MEM, XFmode, operands[1]);
+ operands[1] = gen_rtx_MEM (XFmode, operands[1]);
else if (GET_MODE (operands[1]) == DFmode)
- operands[1] = gen_rtx (MEM, DFmode, operands[1]);
+ operands[1] = gen_rtx_MEM (DFmode, operands[1]);
else
- operands[1] = gen_rtx (MEM, DImode, operands[1]);
+ operands[1] = gen_rtx_MEM (DImode, operands[1]);
optype1 = OFFSOP;
}
@@ -872,8 +1144,8 @@ output_move_double (operands)
{
if (optype0 == REGOP)
{
- middlehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
- latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 2);
+ middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
}
else if (optype0 == OFFSOP)
{
@@ -885,11 +1157,11 @@ output_move_double (operands)
middlehalf[0] = operands[0];
latehalf[0] = operands[0];
}
-
+
if (optype1 == REGOP)
{
- middlehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
- latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 2);
+ middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
}
else if (optype1 == OFFSOP)
{
@@ -918,17 +1190,20 @@ output_move_double (operands)
latehalf[1] = operands[1];
}
}
- else /* size is not 12: */
+
+ else
{
+ /* Size is not 12. */
+
if (optype0 == REGOP)
- latehalf[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
else if (optype0 == OFFSOP)
latehalf[0] = adj_offsettable_operand (operands[0], 4);
else
latehalf[0] = operands[0];
if (optype1 == REGOP)
- latehalf[1] = gen_rtx (REG, SImode, REGNO (operands[1]) + 1);
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
else if (optype1 == OFFSOP)
latehalf[1] = adj_offsettable_operand (operands[1], 4);
else if (optype1 == CNSTOP)
@@ -960,34 +1235,34 @@ output_move_double (operands)
{
/* If both halves of dest are used in the src memory address,
compute the address into latehalf of dest. */
-compadr:
+ compadr:
xops[0] = latehalf[0];
xops[1] = XEXP (operands[1], 0);
output_asm_insn (AS2 (lea%L0,%a1,%0), xops);
- if( GET_MODE (operands[1]) == XFmode )
+ if (GET_MODE (operands[1]) == XFmode)
{
-/* abort (); */
- operands[1] = gen_rtx (MEM, XFmode, latehalf[0]);
+ operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
middlehalf[1] = adj_offsettable_operand (operands[1], size-8);
latehalf[1] = adj_offsettable_operand (operands[1], size-4);
}
else
{
- operands[1] = gen_rtx (MEM, DImode, latehalf[0]);
+ operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
latehalf[1] = adj_offsettable_operand (operands[1], size-4);
}
}
+
else if (size == 12
&& reg_mentioned_p (middlehalf[0], XEXP (operands[1], 0)))
{
/* Check for two regs used by both source and dest. */
if (reg_mentioned_p (operands[0], XEXP (operands[1], 0))
|| reg_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
- goto compadr;
+ goto compadr;
/* JRV says this can't happen: */
if (addreg0 || addreg1)
- abort();
+ abort ();
/* Only the middle reg conflicts; simply put it last. */
output_asm_insn (singlemove_string (operands), operands);
@@ -995,6 +1270,7 @@ compadr:
output_asm_insn (singlemove_string (middlehalf), middlehalf);
return "";
}
+
else if (reg_mentioned_p (operands[0], XEXP (operands[1], 0)))
/* If the low half of dest is mentioned in the source memory
address, the arrange to emit the move late half first. */
@@ -1009,12 +1285,13 @@ compadr:
such overlap can't happen in memory unless the user explicitly
sets it up, and that is an undefined circumstance. */
-/*
+#if 0
if (optype0 == PUSHOP || optype1 == PUSHOP
|| (optype0 == REGOP && optype1 == REGOP
&& REGNO (operands[0]) == REGNO (latehalf[1]))
|| dest_overlapped_low)
-*/
+#endif
+
if (optype0 == PUSHOP || optype1 == PUSHOP
|| (optype0 == REGOP && optype1 == REGOP
&& ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
@@ -1032,17 +1309,17 @@ compadr:
/* Undo the adds we just did. */
if (addreg0)
- asm_add (-4, addreg0);
+ asm_add (-4, addreg0);
if (addreg1)
asm_add (-4, addreg1);
if (size == 12)
{
- output_asm_insn (singlemove_string (middlehalf), middlehalf);
- if (addreg0)
- asm_add (-4, addreg0);
- if (addreg1)
- asm_add (-4, addreg1);
+ output_asm_insn (singlemove_string (middlehalf), middlehalf);
+ if (addreg0)
+ asm_add (-4, addreg0);
+ if (addreg1)
+ asm_add (-4, addreg1);
}
/* Do low-numbered word. */
@@ -1081,7 +1358,6 @@ compadr:
return "";
}
-
#define MAX_TMPS 2 /* max temporary registers used */
@@ -1095,12 +1371,12 @@ output_move_pushmem (operands, insn, length, tmp_start, n_operands)
int tmp_start;
int n_operands;
{
-
- struct {
- char *load;
- char *push;
- rtx xops[2];
- } tmp_info[MAX_TMPS];
+ struct
+ {
+ char *load;
+ char *push;
+ rtx xops[2];
+ } tmp_info[MAX_TMPS];
rtx src = operands[1];
int max_tmps = 0;
@@ -1110,7 +1386,7 @@ output_move_pushmem (operands, insn, length, tmp_start, n_operands)
int i, num_tmps;
rtx xops[1];
- if (!offsettable_memref_p (src))
+ if (! offsettable_memref_p (src))
fatal_insn ("Source is not offsettable", insn);
if ((length & 3) != 0)
@@ -1146,7 +1422,8 @@ output_move_pushmem (operands, insn, length, tmp_start, n_operands)
{
tmp_info[num_tmps].load = AS2(mov%L0,%0,%1);
tmp_info[num_tmps].push = AS1(push%L0,%1);
- tmp_info[num_tmps].xops[0] = adj_offsettable_operand (src, offset + stack_offset);
+ tmp_info[num_tmps].xops[0]
+ = adj_offsettable_operand (src, offset + stack_offset);
offset -= 4;
}
@@ -1162,9 +1439,7 @@ output_move_pushmem (operands, insn, length, tmp_start, n_operands)
return "";
}
-
-
/* Output the appropriate code to move data between two memory locations */
char *
@@ -1175,11 +1450,12 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
int tmp_start;
int n_operands;
{
- struct {
- char *load;
- char *store;
- rtx xops[3];
- } tmp_info[MAX_TMPS];
+ struct
+ {
+ char *load;
+ char *store;
+ rtx xops[3];
+ } tmp_info[MAX_TMPS];
rtx dest = operands[0];
rtx src = operands[1];
@@ -1194,10 +1470,10 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
&& XEXP (XEXP (dest, 0), 0) == stack_pointer_rtx)
return output_move_pushmem (operands, insn, length, tmp_start, n_operands);
- if (!offsettable_memref_p (src))
+ if (! offsettable_memref_p (src))
fatal_insn ("Source is not offsettable", insn);
- if (!offsettable_memref_p (dest))
+ if (! offsettable_memref_p (dest))
fatal_insn ("Destination is not offsettable", insn);
/* Figure out which temporary registers we have available */
@@ -1205,7 +1481,7 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
{
if (GET_CODE (operands[i]) == REG)
{
- if ((length & 1) != 0 && !qi_tmp && QI_REG_P (operands[i]))
+ if ((length & 1) != 0 && qi_tmp == 0 && QI_REG_P (operands[i]))
qi_tmp = operands[i];
if (reg_overlap_mentioned_p (operands[i], dest))
@@ -1214,19 +1490,21 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
if (reg_overlap_mentioned_p (operands[i], src))
fatal_insn ("Temporary register overlaps the source", insn);
- tmp_info[ max_tmps++ ].xops[2] = operands[i];
+ tmp_info[max_tmps++].xops[2] = operands[i];
if (max_tmps == MAX_TMPS)
break;
}
}
if (max_tmps == 0)
- fatal_insn ("No scratch registers were found to do memory->memory moves", insn);
+ fatal_insn ("No scratch registers were found to do memory->memory moves",
+ insn);
if ((length & 1) != 0)
{
- if (!qi_tmp)
- fatal_insn ("No byte register found when moving odd # of bytes.", insn);
+ if (qi_tmp == 0)
+ fatal_insn ("No byte register found when moving odd # of bytes.",
+ insn);
}
while (length > 1)
@@ -1237,17 +1515,24 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
{
tmp_info[num_tmps].load = AS2(mov%L0,%1,%2);
tmp_info[num_tmps].store = AS2(mov%L0,%2,%0);
- tmp_info[num_tmps].xops[0] = adj_offsettable_operand (dest, offset);
- tmp_info[num_tmps].xops[1] = adj_offsettable_operand (src, offset);
+ tmp_info[num_tmps].xops[0]
+ = adj_offsettable_operand (dest, offset);
+ tmp_info[num_tmps].xops[1]
+ = adj_offsettable_operand (src, offset);
+
offset += 4;
length -= 4;
}
+
else if (length >= 2)
{
tmp_info[num_tmps].load = AS2(mov%W0,%1,%2);
tmp_info[num_tmps].store = AS2(mov%W0,%2,%0);
- tmp_info[num_tmps].xops[0] = adj_offsettable_operand (dest, offset);
- tmp_info[num_tmps].xops[1] = adj_offsettable_operand (src, offset);
+ tmp_info[num_tmps].xops[0]
+ = adj_offsettable_operand (dest, offset);
+ tmp_info[num_tmps].xops[1]
+ = adj_offsettable_operand (src, offset);
+
offset += 2;
length -= 2;
}
@@ -1273,7 +1558,6 @@ output_move_memory (operands, insn, length, tmp_start, n_operands)
return "";
}
-
int
standard_80387_constant_p (x)
@@ -1289,7 +1573,7 @@ standard_80387_constant_p (x)
set_float_handler (handler);
REAL_VALUE_FROM_CONST_DOUBLE (d, x);
- is0 = REAL_VALUES_EQUAL (d, dconst0);
+ is0 = REAL_VALUES_EQUAL (d, dconst0) && !REAL_VALUE_MINUS_ZERO (d);
is1 = REAL_VALUES_EQUAL (d, dconst1);
set_float_handler (NULL_PTR);
@@ -1321,6 +1605,7 @@ output_move_const_single (operands)
if (conval == 2)
return "fld1";
}
+
if (GET_CODE (operands[1]) == CONST_DOUBLE)
{
REAL_VALUE_TYPE r; long l;
@@ -1332,6 +1617,7 @@ output_move_const_single (operands)
REAL_VALUE_TO_TARGET_SINGLE (r, l);
operands[1] = GEN_INT (l);
}
+
return singlemove_string (operands);
}
@@ -1341,18 +1627,20 @@ output_move_const_single (operands)
int
symbolic_operand (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
switch (GET_CODE (op))
{
case SYMBOL_REF:
case LABEL_REF:
return 1;
+
case CONST:
op = XEXP (op, 0);
return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
|| GET_CODE (XEXP (op, 0)) == LABEL_REF)
&& GET_CODE (XEXP (op, 1)) == CONST_INT);
+
default:
return 0;
}
@@ -1366,7 +1654,7 @@ symbolic_operand (op, mode)
int
call_insn_operand (op, mode)
rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
if (GET_CODE (op) == MEM
&& ((CONSTANT_ADDRESS_P (XEXP (op, 0))
@@ -1374,9 +1662,10 @@ call_insn_operand (op, mode)
&& general_operand (XEXP (op, 0), Pmode))
|| (GET_CODE (XEXP (op, 0)) == REG
&& XEXP (op, 0) != arg_pointer_rtx
- && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
- && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
+ && ! (REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
+ && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
return 1;
+
return 0;
}
@@ -1386,15 +1675,16 @@ call_insn_operand (op, mode)
int
expander_call_insn_operand (op, mode)
rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
if (GET_CODE (op) == MEM
&& (CONSTANT_ADDRESS_P (XEXP (op, 0))
|| (GET_CODE (XEXP (op, 0)) == REG
&& XEXP (op, 0) != arg_pointer_rtx
- && !(REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
- && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
+ && ! (REGNO (XEXP (op, 0)) >= FIRST_PSEUDO_REGISTER
+ && REGNO (XEXP (op, 0)) <= LAST_VIRTUAL_REGISTER))))
return 1;
+
return 0;
}
@@ -1410,12 +1700,22 @@ arithmetic_comparison_operator (op, mode)
if (mode != VOIDmode && mode != GET_MODE (op))
return 0;
+
code = GET_CODE (op);
if (GET_RTX_CLASS (code) != '<')
return 0;
return (code != GT && code != LE);
}
+
+int
+ix86_logical_operator (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return GET_CODE (op) == AND || GET_CODE (op) == IOR || GET_CODE (op) == XOR;
+}
+
/* Returns 1 if OP contains a symbol reference */
@@ -1440,6 +1740,7 @@ symbolic_reference_mentioned_p (op)
if (symbolic_reference_mentioned_p (XVECEXP (op, i, j)))
return 1;
}
+
else if (fmt[i] == 'e' && symbolic_reference_mentioned_p (XEXP (op, i)))
return 1;
}
@@ -1447,32 +1748,390 @@ symbolic_reference_mentioned_p (op)
return 0;
}
-/* This function generates the assembly code for function entry.
+/* Attempt to expand a binary operator. Make the expansion closer to the
+ actual machine, then just general_operand, which will allow 3 separate
+ memory references (one output, two input) in a single insn. Return
+ whether the insn fails, or succeeds. */
+
+int
+ix86_expand_binary_operator (code, mode, operands)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx operands[];
+{
+ int modified;
+
+ /* Recognize <var1> = <value> <op> <var1> for commutative operators */
+ if (GET_RTX_CLASS (code) == 'c'
+ && (rtx_equal_p (operands[0], operands[2])
+ || immediate_operand (operands[1], mode)))
+ {
+ rtx temp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = temp;
+ }
+
+ /* If optimizing, copy to regs to improve CSE */
+ if (TARGET_PSEUDO && optimize
+ && ((reload_in_progress | reload_completed) == 0))
+ {
+ if (GET_CODE (operands[1]) == MEM
+ && ! rtx_equal_p (operands[0], operands[1]))
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+
+ if (GET_CODE (operands[2]) == MEM)
+ operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
+
+ if (GET_CODE (operands[1]) == CONST_INT && code == MINUS)
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (operands[0]));
+
+ emit_move_insn (temp, operands[1]);
+ operands[1] = temp;
+ return TRUE;
+ }
+ }
+
+ if (!ix86_binary_operator_ok (code, mode, operands))
+ {
+ /* If not optimizing, try to make a valid insn (optimize code
+ previously did this above to improve chances of CSE) */
+
+ if ((! TARGET_PSEUDO || !optimize)
+ && ((reload_in_progress | reload_completed) == 0)
+ && (GET_CODE (operands[1]) == MEM || GET_CODE (operands[2]) == MEM))
+ {
+ modified = FALSE;
+ if (GET_CODE (operands[1]) == MEM
+ && ! rtx_equal_p (operands[0], operands[1]))
+ {
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+ modified = TRUE;
+ }
+
+ if (GET_CODE (operands[2]) == MEM)
+ {
+ operands[2] = force_reg (GET_MODE (operands[2]), operands[2]);
+ modified = TRUE;
+ }
+
+ if (GET_CODE (operands[1]) == CONST_INT && code == MINUS)
+ {
+ rtx temp = gen_reg_rtx (GET_MODE (operands[0]));
+
+ emit_move_insn (temp, operands[1]);
+ operands[1] = temp;
+ return TRUE;
+ }
+
+ if (modified && ! ix86_binary_operator_ok (code, mode, operands))
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Return TRUE or FALSE depending on whether the binary operator meets the
+ appropriate constraints. */
+
+int
+ix86_binary_operator_ok (code, mode, operands)
+ enum rtx_code code;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx operands[3];
+{
+ return (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != MEM)
+ && (GET_CODE (operands[1]) != CONST_INT || GET_RTX_CLASS (code) == 'c');
+}
+
+/* Attempt to expand a unary operator. Make the expansion closer to the
+ actual machine, then just general_operand, which will allow 2 separate
+ memory references (one output, one input) in a single insn. Return
+ whether the insn fails, or succeeds. */
+
+int
+ix86_expand_unary_operator (code, mode, operands)
+ enum rtx_code code;
+ enum machine_mode mode;
+ rtx operands[];
+{
+ /* If optimizing, copy to regs to improve CSE */
+ if (TARGET_PSEUDO
+ && optimize
+ && ((reload_in_progress | reload_completed) == 0)
+ && GET_CODE (operands[1]) == MEM)
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+
+ if (! ix86_unary_operator_ok (code, mode, operands))
+ {
+ if ((! TARGET_PSEUDO || optimize == 0)
+ && ((reload_in_progress | reload_completed) == 0)
+ && GET_CODE (operands[1]) == MEM)
+ {
+ operands[1] = force_reg (GET_MODE (operands[1]), operands[1]);
+ if (! ix86_unary_operator_ok (code, mode, operands))
+ return FALSE;
+ }
+ else
+ return FALSE;
+ }
+
+ return TRUE;
+}
+
+/* Return TRUE or FALSE depending on whether the unary operator meets the
+ appropriate constraints. */
+
+int
+ix86_unary_operator_ok (code, mode, operands)
+ enum rtx_code code ATTRIBUTE_UNUSED;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx operands[2] ATTRIBUTE_UNUSED;
+{
+ return TRUE;
+}
+
+static rtx pic_label_rtx;
+static char pic_label_name [256];
+static int pic_label_no = 0;
+
+/* This function generates code for -fpic that loads %ebx with
+ the return address of the caller and then returns. */
+
+void
+asm_output_function_prefix (file, name)
+ FILE *file;
+ char *name ATTRIBUTE_UNUSED;
+{
+ rtx xops[2];
+ int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
+ || current_function_uses_const_pool);
+ xops[0] = pic_offset_table_rtx;
+ xops[1] = stack_pointer_rtx;
+
+ /* Deep branch prediction favors having a return for every call. */
+ if (pic_reg_used && TARGET_DEEP_BRANCH_PREDICTION)
+ {
+ tree prologue_node;
+
+ if (pic_label_rtx == 0)
+ {
+ pic_label_rtx = gen_label_rtx ();
+ ASM_GENERATE_INTERNAL_LABEL (pic_label_name, "LPR", pic_label_no++);
+ LABEL_NAME (pic_label_rtx) = pic_label_name;
+ }
+
+ prologue_node = make_node (FUNCTION_DECL);
+ DECL_RESULT (prologue_node) = 0;
+
+ /* This used to call ASM_DECLARE_FUNCTION_NAME() but since it's an
+ internal (non-global) label that's being emitted, it didn't make
+ sense to have .type information for local labels. This caused
+ the SCO OpenServer 5.0.4 ELF assembler grief (why are you giving
+ me debug info for a label that you're declaring non-global?) this
+ was changed to call ASM_OUTPUT_LABEL() instead. */
+
+
+ ASM_OUTPUT_LABEL (file, pic_label_name);
+ output_asm_insn ("movl (%1),%0", xops);
+ output_asm_insn ("ret", xops);
+ }
+}
+
+/* Generate the assembly code for function entry.
FILE is an stdio stream to output the code to.
SIZE is an int: how many units of temporary storage to allocate. */
void
function_prologue (file, size)
- FILE *file;
- int size;
+ FILE *file ATTRIBUTE_UNUSED;
+ int size ATTRIBUTE_UNUSED;
+{
+ if (TARGET_SCHEDULE_PROLOGUE)
+ {
+ pic_label_rtx = 0;
+ return;
+ }
+
+ ix86_prologue (0);
+}
+
+/* Expand the prologue into a bunch of separate insns. */
+
+void
+ix86_expand_prologue ()
+{
+ if (! TARGET_SCHEDULE_PROLOGUE)
+ return;
+
+ ix86_prologue (1);
+}
+
+void
+load_pic_register (do_rtl)
+ int do_rtl;
+{
+ rtx xops[4];
+
+ if (TARGET_DEEP_BRANCH_PREDICTION)
+ {
+ xops[0] = pic_offset_table_rtx;
+ if (pic_label_rtx == 0)
+ {
+ pic_label_rtx = gen_label_rtx ();
+ ASM_GENERATE_INTERNAL_LABEL (pic_label_name, "LPR", pic_label_no++);
+ LABEL_NAME (pic_label_rtx) = pic_label_name;
+ }
+
+ xops[1] = gen_rtx_MEM (QImode,
+ gen_rtx (SYMBOL_REF, Pmode,
+ LABEL_NAME (pic_label_rtx)));
+
+ if (do_rtl)
+ {
+ emit_insn (gen_prologue_get_pc (xops[0], xops[1]));
+ emit_insn (gen_prologue_set_got (xops[0],
+ gen_rtx (SYMBOL_REF, Pmode,
+ "$_GLOBAL_OFFSET_TABLE_"),
+ xops[1]));
+ }
+ else
+ {
+ output_asm_insn (AS1 (call,%X1), xops);
+ output_asm_insn ("addl $_GLOBAL_OFFSET_TABLE_,%0", xops);
+ pic_label_rtx = 0;
+ }
+ }
+
+ else
+ {
+ xops[0] = pic_offset_table_rtx;
+ xops[1] = gen_label_rtx ();
+
+ if (do_rtl)
+ {
+ /* We can't put a raw CODE_LABEL into the RTL, and we can't emit
+ a new CODE_LABEL after reload, so we need a single pattern to
+ emit the 3 necessary instructions. */
+ emit_insn (gen_prologue_get_pc_and_set_got (xops[0]));
+ }
+ else
+ {
+ output_asm_insn (AS1 (call,%P1), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[1]));
+ output_asm_insn (AS1 (pop%L0,%0), xops);
+ output_asm_insn ("addl $_GLOBAL_OFFSET_TABLE_+[.-%P1],%0", xops);
+ }
+ }
+
+ /* When -fpic, we must emit a scheduling barrier, so that the instruction
+ that restores %ebx (which is PIC_OFFSET_TABLE_REGNUM), does not get
+ moved before any instruction which implicitly uses the got. */
+
+ if (do_rtl)
+ emit_insn (gen_blockage ());
+}
+
+static void
+ix86_prologue (do_rtl)
+ int do_rtl;
{
register int regno;
int limit;
rtx xops[4];
int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
|| current_function_uses_const_pool);
+ long tsize = get_frame_size ();
+ rtx insn;
+ int cfa_offset = INCOMING_FRAME_SP_OFFSET, cfa_store_offset = cfa_offset;
xops[0] = stack_pointer_rtx;
xops[1] = frame_pointer_rtx;
- xops[2] = GEN_INT (size);
+ xops[2] = GEN_INT (tsize);
+
if (frame_pointer_needed)
{
- output_asm_insn ("push%L1 %1", xops);
- output_asm_insn (AS2 (mov%L0,%0,%1), xops);
+ if (do_rtl)
+ {
+ insn = emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx (PRE_DEC, SImode,
+ stack_pointer_rtx)),
+ frame_pointer_rtx));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_move_insn (xops[1], xops[0]);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+
+ else
+ {
+ output_asm_insn ("push%L1 %1", xops);
+#ifdef INCOMING_RETURN_ADDR_RTX
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+
+ cfa_store_offset += 4;
+ cfa_offset = cfa_store_offset;
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset);
+ dwarf2out_reg_save (l, FRAME_POINTER_REGNUM, - cfa_store_offset);
+ }
+#endif
+
+ output_asm_insn (AS2 (mov%L0,%0,%1), xops);
+#ifdef INCOMING_RETURN_ADDR_RTX
+ if (dwarf2out_do_frame ())
+ dwarf2out_def_cfa ("", FRAME_POINTER_REGNUM, cfa_offset);
+#endif
+ }
+ }
+
+ if (tsize == 0)
+ ;
+ else if (! TARGET_STACK_PROBE || tsize < CHECK_STACK_LIMIT)
+ {
+ if (do_rtl)
+ {
+ insn = emit_insn (gen_prologue_set_stack_ptr (xops[2]));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ {
+ output_asm_insn (AS2 (sub%L0,%2,%0), xops);
+#ifdef INCOMING_RETURN_ADDR_RTX
+ if (dwarf2out_do_frame ())
+ {
+ cfa_store_offset += tsize;
+ if (! frame_pointer_needed)
+ {
+ cfa_offset = cfa_store_offset;
+ dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, cfa_offset);
+ }
+ }
+#endif
+ }
}
+ else
+ {
+ xops[3] = gen_rtx_REG (SImode, 0);
+ if (do_rtl)
+ emit_move_insn (xops[3], xops[2]);
+ else
+ output_asm_insn (AS2 (mov%L0,%2,%3), xops);
- if (size)
- output_asm_insn (AS2 (sub%L0,%2,%0), xops);
+ xops[3] = gen_rtx_MEM (FUNCTION_MODE,
+ gen_rtx (SYMBOL_REF, Pmode, "_alloca"));
+
+ if (do_rtl)
+ emit_call_insn (gen_rtx (CALL, VOIDmode, xops[3], const0_rtx));
+ else
+ output_asm_insn (AS1 (call,%P3), xops);
+ }
/* Note If use enter it is NOT reversed args.
This one is not reversed from intel!!
@@ -1483,39 +2142,69 @@ function_prologue (file, size)
output_asm_insn ("enter %2,%3", xops);
}
*/
+
limit = (frame_pointer_needed ? FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
for (regno = limit - 1; regno >= 0; regno--)
if ((regs_ever_live[regno] && ! call_used_regs[regno])
|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
{
- xops[0] = gen_rtx (REG, SImode, regno);
- output_asm_insn ("push%L0 %0", xops);
+ xops[0] = gen_rtx_REG (SImode, regno);
+ if (do_rtl)
+ {
+ insn = emit_insn (gen_rtx (SET, VOIDmode,
+ gen_rtx_MEM (SImode,
+ gen_rtx (PRE_DEC, SImode,
+ stack_pointer_rtx)),
+ xops[0]));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else
+ {
+ output_asm_insn ("push%L0 %0", xops);
+#ifdef INCOMING_RETURN_ADDR_RTX
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+
+ cfa_store_offset += 4;
+ if (! frame_pointer_needed)
+ {
+ cfa_offset = cfa_store_offset;
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, cfa_offset);
+ }
+
+ dwarf2out_reg_save (l, regno, - cfa_store_offset);
+ }
+#endif
+ }
}
if (pic_reg_used)
- {
- xops[0] = pic_offset_table_rtx;
- xops[1] = (rtx) gen_label_rtx ();
-
- output_asm_insn (AS1 (call,%P1), xops);
- ASM_OUTPUT_INTERNAL_LABEL (file, "L", CODE_LABEL_NUMBER (xops[1]));
- output_asm_insn (AS1 (pop%L0,%0), xops);
- output_asm_insn ("addl $_GLOBAL_OFFSET_TABLE_+[.-%P1],%0", xops);
- }
+ load_pic_register (do_rtl);
+
+ /* If we are profiling, make sure no instructions are scheduled before
+ the call to mcount. However, if -fpic, the above call will have
+ done that. */
+ if ((profile_flag || profile_block_flag)
+ && ! pic_reg_used && do_rtl)
+ emit_insn (gen_blockage ());
}
/* Return 1 if it is appropriate to emit `ret' instructions in the
body of a function. Do this only if the epilogue is simple, needing a
couple of insns. Prior to reloading, we can't tell how many registers
- must be saved, so return 0 then.
+ must be saved, so return 0 then. Return 0 if there is no frame
+ marker to de-allocate.
If NON_SAVING_SETJMP is defined and true, then it is not possible
for the epilogue to be simple, so return 0. This is a special case
- since NON_SAVING_SETJMP will not cause regs_ever_live to change until
- final, but jump_optimize may need to know sooner if a `return' is OK. */
+ since NON_SAVING_SETJMP will not cause regs_ever_live to change
+ until final, but jump_optimize may need to know sooner if a
+ `return' is OK. */
int
-simple_386_epilogue ()
+ix86_can_use_return_insn_p ()
{
int regno;
int nregs = 0;
@@ -1540,15 +2229,29 @@ simple_386_epilogue ()
return nregs == 0 || ! frame_pointer_needed;
}
-
/* This function generates the assembly code for function exit.
FILE is an stdio stream to output the code to.
SIZE is an int: how many units of temporary storage to deallocate. */
void
function_epilogue (file, size)
- FILE *file;
- int size;
+ FILE *file ATTRIBUTE_UNUSED;
+ int size ATTRIBUTE_UNUSED;
+{
+ return;
+}
+
+/* Restore function stack, frame, and registers. */
+
+void
+ix86_expand_epilogue ()
+{
+ ix86_epilogue (1);
+}
+
+static void
+ix86_epilogue (do_rtl)
+ int do_rtl;
{
register int regno;
register int nregs, limit;
@@ -1556,12 +2259,11 @@ function_epilogue (file, size)
rtx xops[3];
int pic_reg_used = flag_pic && (current_function_uses_pic_offset_table
|| current_function_uses_const_pool);
+ long tsize = get_frame_size ();
/* Compute the number of registers to pop */
- limit = (frame_pointer_needed
- ? FRAME_POINTER_REGNUM
- : STACK_POINTER_REGNUM);
+ limit = (frame_pointer_needed ? FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM);
nregs = 0;
@@ -1570,67 +2272,159 @@ function_epilogue (file, size)
|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
nregs++;
- /* sp is often unreliable so we must go off the frame pointer,
- */
+ /* sp is often unreliable so we must go off the frame pointer.
- /* In reality, we may not care if sp is unreliable, because we can
- restore the register relative to the frame pointer. In theory,
- since each move is the same speed as a pop, and we don't need the
- leal, this is faster. For now restore multiple registers the old
- way. */
+ In reality, we may not care if sp is unreliable, because we can restore
+ the register relative to the frame pointer. In theory, since each move
+ is the same speed as a pop, and we don't need the leal, this is faster.
+ For now restore multiple registers the old way. */
- offset = -size - (nregs * UNITS_PER_WORD);
+ offset = - tsize - (nregs * UNITS_PER_WORD);
xops[2] = stack_pointer_rtx;
+ /* When -fpic, we must emit a scheduling barrier, so that the instruction
+ that restores %ebx (which is PIC_OFFSET_TABLE_REGNUM), does not get
+ moved before any instruction which implicitly uses the got. This
+ includes any instruction which uses a SYMBOL_REF or a LABEL_REF.
+
+ Alternatively, this could be fixed by making the dependence on the
+ PIC_OFFSET_TABLE_REGNUM explicit in the RTL. */
+
+ if (flag_pic || profile_flag || profile_block_flag)
+ emit_insn (gen_blockage ());
+
if (nregs > 1 || ! frame_pointer_needed)
{
if (frame_pointer_needed)
{
- xops[0] = adj_offsettable_operand (AT_BP (Pmode), offset);
- output_asm_insn (AS2 (lea%L2,%0,%2), xops);
+ xops[0] = adj_offsettable_operand (AT_BP (QImode), offset);
+ if (do_rtl)
+ emit_insn (gen_movsi_lea (xops[2], XEXP (xops[0], 0)));
+ else
+ output_asm_insn (AS2 (lea%L2,%0,%2), xops);
}
for (regno = 0; regno < limit; regno++)
if ((regs_ever_live[regno] && ! call_used_regs[regno])
|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
{
- xops[0] = gen_rtx (REG, SImode, regno);
- output_asm_insn ("pop%L0 %0", xops);
+ xops[0] = gen_rtx_REG (SImode, regno);
+
+ if (do_rtl)
+ emit_insn (gen_pop (xops[0]));
+ else
+ output_asm_insn ("pop%L0 %0", xops);
}
}
+
else
for (regno = 0; regno < limit; regno++)
if ((regs_ever_live[regno] && ! call_used_regs[regno])
|| (regno == PIC_OFFSET_TABLE_REGNUM && pic_reg_used))
{
- xops[0] = gen_rtx (REG, SImode, regno);
+ xops[0] = gen_rtx_REG (SImode, regno);
xops[1] = adj_offsettable_operand (AT_BP (Pmode), offset);
- output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
+ if (do_rtl)
+ emit_move_insn (xops[0], xops[1]);
+ else
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
offset += 4;
}
if (frame_pointer_needed)
{
- /* On i486, mov & pop is faster than "leave". */
+ /* If not an i386, mov & pop is faster than "leave". */
- if (!TARGET_386)
+ if (TARGET_USE_LEAVE)
{
- xops[0] = frame_pointer_rtx;
- output_asm_insn (AS2 (mov%L2,%0,%2), xops);
- output_asm_insn ("pop%L0 %0", xops);
+ if (do_rtl)
+ emit_insn (gen_leave());
+ else
+ output_asm_insn ("leave", xops);
}
else
- output_asm_insn ("leave", xops);
+ {
+ xops[0] = frame_pointer_rtx;
+ xops[1] = stack_pointer_rtx;
+
+ if (do_rtl)
+ {
+ emit_insn (gen_epilogue_set_stack_ptr());
+ emit_insn (gen_pop (xops[0]));
+ }
+ else
+ {
+ output_asm_insn (AS2 (mov%L2,%0,%2), xops);
+ output_asm_insn ("pop%L0 %0", xops);
+ }
+ }
}
- else if (size)
+
+ else if (tsize)
{
- /* If there is no frame pointer, we must still release the frame. */
+ /* Intel's docs say that for 4 or 8 bytes of stack frame one should
+ use `pop' and not `add'. */
+ int use_pop = tsize == 4;
+
+ /* Use two pops only for the Pentium processors. */
+ if (tsize == 8 && !TARGET_386 && !TARGET_486)
+ {
+ rtx retval = current_function_return_rtx;
+
+ xops[1] = gen_rtx_REG (SImode, 1); /* %edx */
- xops[0] = GEN_INT (size);
- output_asm_insn (AS2 (add%L2,%0,%2), xops);
+ /* This case is a bit more complex. Since we cannot pop into
+ %ecx twice we need a second register. But this is only
+ available if the return value is not of DImode in which
+ case the %edx register is not available. */
+ use_pop = (retval == NULL
+ || ! reg_overlap_mentioned_p (xops[1], retval));
+ }
+
+ if (use_pop)
+ {
+ xops[0] = gen_rtx_REG (SImode, 2); /* %ecx */
+
+ if (do_rtl)
+ {
+ /* We have to prevent the two pops here from being scheduled.
+ GCC otherwise would try in some situation to put other
+ instructions in between them which has a bad effect. */
+ emit_insn (gen_blockage ());
+ emit_insn (gen_pop (xops[0]));
+ if (tsize == 8)
+ emit_insn (gen_pop (xops[1]));
+ }
+ else
+ {
+ output_asm_insn ("pop%L0 %0", xops);
+ if (tsize == 8)
+ output_asm_insn ("pop%L1 %1", xops);
+ }
+ }
+ else
+ {
+ /* If there is no frame pointer, we must still release the frame. */
+ xops[0] = GEN_INT (tsize);
+
+ if (do_rtl)
+ emit_insn (gen_rtx (SET, VOIDmode, xops[2],
+ gen_rtx (PLUS, SImode, xops[2], xops[0])));
+ else
+ output_asm_insn (AS2 (add%L2,%0,%2), xops);
+ }
}
+#ifdef FUNCTION_BLOCK_PROFILER_EXIT
+ if (profile_block_flag == 2)
+ {
+ FUNCTION_BLOCK_PROFILER_EXIT(file);
+ }
+#endif
+
if (current_function_pops_args && current_function_args_size)
{
xops[1] = GEN_INT (current_function_pops_args);
@@ -1642,18 +2436,38 @@ function_epilogue (file, size)
if (current_function_pops_args >= 32768)
{
/* ??? Which register to use here? */
- xops[0] = gen_rtx (REG, SImode, 2);
- output_asm_insn ("pop%L0 %0", xops);
- output_asm_insn (AS2 (add%L2,%1,%2), xops);
- output_asm_insn ("jmp %*%0", xops);
+ xops[0] = gen_rtx_REG (SImode, 2);
+
+ if (do_rtl)
+ {
+ emit_insn (gen_pop (xops[0]));
+ emit_insn (gen_rtx (SET, VOIDmode, xops[2],
+ gen_rtx (PLUS, SImode, xops[1], xops[2])));
+ emit_jump_insn (xops[0]);
+ }
+ else
+ {
+ output_asm_insn ("pop%L0 %0", xops);
+ output_asm_insn (AS2 (add%L2,%1,%2), xops);
+ output_asm_insn ("jmp %*%0", xops);
+ }
}
else
- output_asm_insn ("ret %1", xops);
+ {
+ if (do_rtl)
+ emit_jump_insn (gen_return_pop_internal (xops[1]));
+ else
+ output_asm_insn ("ret %1", xops);
+ }
}
else
- output_asm_insn ("ret", xops);
+ {
+ if (do_rtl)
+ emit_jump_insn (gen_return_internal ());
+ else
+ output_asm_insn ("ret", xops);
+ }
}
-
/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
that is a valid memory address for an instruction.
@@ -1703,14 +2517,14 @@ legitimate_address_p (mode, addr, strict)
if (TARGET_DEBUG_ADDR)
{
fprintf (stderr,
- "\n==========\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
+ "\n======\nGO_IF_LEGITIMATE_ADDRESS, mode = %s, strict = %d\n",
GET_MODE_NAME (mode), strict);
debug_rtx (addr);
}
if (GET_CODE (addr) == REG || GET_CODE (addr) == SUBREG)
- base = addr; /* base reg */
+ base = addr;
else if (GET_CODE (addr) == PLUS)
{
@@ -1723,13 +2537,13 @@ legitimate_address_p (mode, addr, strict)
{
if (code1 == REG || code1 == SUBREG)
{
- indx = op0; /* index + base */
+ indx = op0; /* index + base */
base = op1;
}
else
{
- base = op0; /* base + displacement */
+ base = op0; /* base + displacement */
disp = op1;
}
}
@@ -1740,10 +2554,10 @@ legitimate_address_p (mode, addr, strict)
scale = XEXP (op0, 1);
if (code1 == REG || code1 == SUBREG)
- base = op1; /* index*scale + base */
+ base = op1; /* index*scale + base */
else
- disp = op1; /* index*scale + disp */
+ disp = op1; /* index*scale + disp */
}
else if (code0 == PLUS && GET_CODE (XEXP (op0, 0)) == MULT)
@@ -1756,7 +2570,7 @@ legitimate_address_p (mode, addr, strict)
else if (code0 == PLUS)
{
- indx = XEXP (op0, 0); /* index + base + disp */
+ indx = XEXP (op0, 0); /* index + base + disp */
base = XEXP (op0, 1);
disp = op1;
}
@@ -1770,12 +2584,12 @@ legitimate_address_p (mode, addr, strict)
else if (GET_CODE (addr) == MULT)
{
- indx = XEXP (addr, 0); /* index*scale */
+ indx = XEXP (addr, 0); /* index*scale */
scale = XEXP (addr, 1);
}
else
- disp = addr; /* displacement */
+ disp = addr; /* displacement */
/* Allow arg pointer and stack pointer as index if there is not scaling */
if (base && indx && !scale
@@ -1786,10 +2600,12 @@ legitimate_address_p (mode, addr, strict)
indx = tmp;
}
- /* Validate base register */
- /* Don't allow SUBREG's here, it can lead to spill failures when the base
+ /* Validate base register:
+
+ Don't allow SUBREG's here, it can lead to spill failures when the base
is one word out of a two word structure, which is represented internally
as a DImode int. */
+
if (base)
{
if (GET_CODE (base) != REG)
@@ -1798,16 +2614,17 @@ legitimate_address_p (mode, addr, strict)
return FALSE;
}
- if ((strict && !REG_OK_FOR_BASE_STRICT_P (base))
- || (!strict && !REG_OK_FOR_BASE_NONSTRICT_P (base)))
+ if ((strict && ! REG_OK_FOR_BASE_STRICT_P (base))
+ || (! strict && ! REG_OK_FOR_BASE_NONSTRICT_P (base)))
{
ADDR_INVALID ("Base is not valid.\n", base);
return FALSE;
}
}
- /* Validate index register */
- /* Don't allow SUBREG's here, it can lead to spill failures when the index
+ /* Validate index register:
+
+ Don't allow SUBREG's here, it can lead to spill failures when the index
is one word out of a two word structure, which is represented internally
as a DImode int. */
if (indx)
@@ -1818,17 +2635,17 @@ legitimate_address_p (mode, addr, strict)
return FALSE;
}
- if ((strict && !REG_OK_FOR_INDEX_STRICT_P (indx))
- || (!strict && !REG_OK_FOR_INDEX_NONSTRICT_P (indx)))
+ if ((strict && ! REG_OK_FOR_INDEX_STRICT_P (indx))
+ || (! strict && ! REG_OK_FOR_INDEX_NONSTRICT_P (indx)))
{
ADDR_INVALID ("Index is not valid.\n", indx);
return FALSE;
}
}
else if (scale)
- abort (); /* scale w/o index invalid */
+ abort (); /* scale w/o index invalid */
- /* Validate scale factor */
+ /* Validate scale factor: */
if (scale)
{
HOST_WIDE_INT value;
@@ -1847,32 +2664,44 @@ legitimate_address_p (mode, addr, strict)
}
}
- /* Validate displacement */
+ /* Validate displacement
+ Constant pool addresses must be handled special. They are
+ considered legitimate addresses, but only if not used with regs.
+ When printed, the output routines know to print the reference with the
+ PIC reg, even though the PIC reg doesn't appear in the RTL. */
if (disp)
{
- if (!CONSTANT_ADDRESS_P (disp))
+ if (GET_CODE (disp) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (disp)
+ && base == 0
+ && indx == 0)
+ ;
+
+ else if (!CONSTANT_ADDRESS_P (disp))
{
ADDR_INVALID ("Displacement is not valid.\n", disp);
return FALSE;
}
- if (GET_CODE (disp) == CONST_DOUBLE)
+ else if (GET_CODE (disp) == CONST_DOUBLE)
{
ADDR_INVALID ("Displacement is a const_double.\n", disp);
return FALSE;
}
- if (flag_pic && SYMBOLIC_CONST (disp) && base != pic_offset_table_rtx
- && (indx != pic_offset_table_rtx || scale != NULL_RTX))
+ else if (flag_pic && SYMBOLIC_CONST (disp)
+ && base != pic_offset_table_rtx
+ && (indx != pic_offset_table_rtx || scale != NULL_RTX))
{
ADDR_INVALID ("Displacement is an invalid pic reference.\n", disp);
return FALSE;
}
- if (HALF_PIC_P () && HALF_PIC_ADDRESS_P (disp)
- && (base != NULL_RTX || indx != NULL_RTX))
+ else if (HALF_PIC_P () && HALF_PIC_ADDRESS_P (disp)
+ && (base != NULL_RTX || indx != NULL_RTX))
{
- ADDR_INVALID ("Displacement is an invalid half-pic reference.\n", disp);
+ ADDR_INVALID ("Displacement is an invalid half-pic reference.\n",
+ disp);
return FALSE;
}
}
@@ -1883,7 +2712,6 @@ legitimate_address_p (mode, addr, strict)
/* Everything looks valid, return true */
return TRUE;
}
-
/* Return a legitimate reference for ORIG (an address) using the
register REG. If REG is 0, a new pseudo is generated.
@@ -1933,15 +2761,15 @@ legitimize_pic_address (orig, reg)
|| GET_CODE (addr) == LABEL_REF)
new = gen_rtx (PLUS, Pmode, pic_offset_table_rtx, orig);
else
- new = gen_rtx (MEM, Pmode,
- gen_rtx (PLUS, Pmode,
- pic_offset_table_rtx, orig));
+ new = gen_rtx_MEM (Pmode,
+ gen_rtx (PLUS, Pmode, pic_offset_table_rtx, orig));
emit_move_insn (reg, new);
}
current_function_uses_pic_offset_table = 1;
return reg;
}
+
else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
{
rtx base;
@@ -1971,27 +2799,26 @@ legitimize_pic_address (orig, reg)
base = gen_rtx (PLUS, Pmode, base, XEXP (addr, 0));
addr = XEXP (addr, 1);
}
- return gen_rtx (PLUS, Pmode, base, addr);
+
+ return gen_rtx (PLUS, Pmode, base, addr);
}
return new;
}
-
/* Emit insns to move operands[1] into operands[0]. */
void
emit_pic_move (operands, mode)
rtx *operands;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
rtx temp = reload_in_progress ? operands[0] : gen_reg_rtx (Pmode);
if (GET_CODE (operands[0]) == MEM && SYMBOLIC_CONST (operands[1]))
- operands[1] = (rtx) force_reg (SImode, operands[1]);
+ operands[1] = force_reg (SImode, operands[1]);
else
operands[1] = legitimize_pic_address (operands[1], temp);
}
-
/* Try machine-dependent ways of modifying an illegitimate address
to be legitimate. If we find one, return the new, valid address.
@@ -2017,7 +2844,7 @@ emit_pic_move (operands, mode)
rtx
legitimize_address (x, oldx, mode)
register rtx x;
- register rtx oldx;
+ register rtx oldx ATTRIBUTE_UNUSED;
enum machine_mode mode;
{
int changed = 0;
@@ -2025,7 +2852,8 @@ legitimize_address (x, oldx, mode)
if (TARGET_DEBUG_ADDR)
{
- fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n", GET_MODE_NAME (mode));
+ fprintf (stderr, "\n==========\nLEGITIMIZE_ADDRESS, mode = %s\n",
+ GET_MODE_NAME (mode));
debug_rtx (x);
}
@@ -2038,14 +2866,14 @@ legitimize_address (x, oldx, mode)
&& (log = (unsigned)exact_log2 (INTVAL (XEXP (x, 1)))) < 4)
{
changed = 1;
- x = gen_rtx (MULT, Pmode,
- force_reg (Pmode, XEXP (x, 0)),
+ x = gen_rtx (MULT, Pmode, force_reg (Pmode, XEXP (x, 0)),
GEN_INT (1 << log));
}
if (GET_CODE (x) == PLUS)
{
- /* Canonicalize shifts by 0, 1, 2, 3 into multiply */
+ /* Canonicalize shifts by 0, 1, 2, 3 into multiply. */
+
if (GET_CODE (XEXP (x, 0)) == ASHIFT
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
&& (log = (unsigned)exact_log2 (INTVAL (XEXP (XEXP (x, 0), 1)))) < 4)
@@ -2066,7 +2894,7 @@ legitimize_address (x, oldx, mode)
GEN_INT (1 << log));
}
- /* Put multiply first if it isn't already */
+ /* Put multiply first if it isn't already. */
if (GET_CODE (XEXP (x, 1)) == MULT)
{
rtx tmp = XEXP (x, 0);
@@ -2083,18 +2911,21 @@ legitimize_address (x, oldx, mode)
{
changed = 1;
x = gen_rtx (PLUS, Pmode,
- gen_rtx (PLUS, Pmode, XEXP (x, 0), XEXP (XEXP (x, 1), 0)),
+ gen_rtx (PLUS, Pmode, XEXP (x, 0),
+ XEXP (XEXP (x, 1), 0)),
XEXP (XEXP (x, 1), 1));
}
- /* Canonicalize (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
+ /* Canonicalize
+ (plus (plus (mult (reg) (const)) (plus (reg) (const))) const)
into (plus (plus (mult (reg) (const)) (reg)) (const)). */
else if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS
&& GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
&& GET_CODE (XEXP (XEXP (x, 0), 1)) == PLUS
&& CONSTANT_P (XEXP (x, 1)))
{
- rtx constant, other;
+ rtx constant;
+ rtx other = NULL_RTX;
if (GET_CODE (XEXP (x, 1)) == CONST_INT)
{
@@ -2173,7 +3004,6 @@ legitimize_address (x, oldx, mode)
return x;
}
-
/* Print an integer constant expression in assembler syntax. Addition
and subtraction are the only arithmetic that may appear in these
@@ -2208,7 +3038,9 @@ output_pic_addr_const (file, x, code)
assemble_name (asm_out_file, buf);
}
- if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
+ if (code == 'X')
+ ; /* No suffix, dammit. */
+ else if (GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
fprintf (file, "@GOTOFF(%%ebx)");
else if (code == 'P')
fprintf (file, "@PLT");
@@ -2227,7 +3059,7 @@ output_pic_addr_const (file, x, code)
break;
case CONST_INT:
- fprintf (file, "%d", INTVAL (x));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
break;
case CONST:
@@ -2241,10 +3073,11 @@ output_pic_addr_const (file, x, code)
{
/* We can use %d if the number is <32 bits and positive. */
if (CONST_DOUBLE_HIGH (x) || CONST_DOUBLE_LOW (x) < 0)
- fprintf (file, "0x%x%08x",
- CONST_DOUBLE_HIGH (x), CONST_DOUBLE_LOW (x));
+ fprintf (file, "0x%lx%08lx",
+ (unsigned long) CONST_DOUBLE_HIGH (x),
+ (unsigned long) CONST_DOUBLE_LOW (x));
else
- fprintf (file, "%d", CONST_DOUBLE_LOW (x));
+ fprintf (file, HOST_WIDE_INT_PRINT_DEC, CONST_DOUBLE_LOW (x));
}
else
/* We can't handle floating point constants;
@@ -2253,20 +3086,20 @@ output_pic_addr_const (file, x, code)
break;
case PLUS:
- /* Some assemblers need integer constants to appear last (eg masm). */
+ /* Some assemblers need integer constants to appear first. */
if (GET_CODE (XEXP (x, 0)) == CONST_INT)
{
- output_pic_addr_const (file, XEXP (x, 1), code);
- if (INTVAL (XEXP (x, 0)) >= 0)
- fprintf (file, "+");
output_pic_addr_const (file, XEXP (x, 0), code);
+ if (INTVAL (XEXP (x, 1)) >= 0)
+ fprintf (file, "+");
+ output_pic_addr_const (file, XEXP (x, 1), code);
}
else
{
- output_pic_addr_const (file, XEXP (x, 0), code);
- if (INTVAL (XEXP (x, 1)) >= 0)
- fprintf (file, "+");
output_pic_addr_const (file, XEXP (x, 1), code);
+ if (INTVAL (XEXP (x, 0)) >= 0)
+ fprintf (file, "+");
+ output_pic_addr_const (file, XEXP (x, 0), code);
}
break;
@@ -2281,16 +3114,138 @@ output_pic_addr_const (file, x, code)
}
}
+/* Append the correct conditional move suffix which corresponds to CODE. */
+
+static void
+put_condition_code (code, reverse_cc, mode, file)
+ enum rtx_code code;
+ int reverse_cc;
+ enum mode_class mode;
+ FILE * file;
+{
+ int ieee = (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI));
+ if (reverse_cc && ! ieee)
+ code = reverse_condition (code);
+
+ if (mode == MODE_INT)
+ switch (code)
+ {
+ case NE:
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ fputs ("b", file);
+ else
+ fputs ("ne", file);
+ return;
+
+ case EQ:
+ if (cc_prev_status.flags & CC_Z_IN_NOT_C)
+ fputs ("ae", file);
+ else
+ fputs ("e", file);
+ return;
+
+ case GE:
+ if (cc_prev_status.flags & CC_NO_OVERFLOW)
+ fputs ("ns", file);
+ else
+ fputs ("ge", file);
+ return;
+
+ case GT:
+ fputs ("g", file);
+ return;
+
+ case LE:
+ fputs ("le", file);
+ return;
+
+ case LT:
+ if (cc_prev_status.flags & CC_NO_OVERFLOW)
+ fputs ("s", file);
+ else
+ fputs ("l", file);
+ return;
+
+ case GEU:
+ fputs ("ae", file);
+ return;
+
+ case GTU:
+ fputs ("a", file);
+ return;
+
+ case LEU:
+ fputs ("be", file);
+ return;
+
+ case LTU:
+ fputs ("b", file);
+ return;
+
+ default:
+ output_operand_lossage ("Invalid %%C operand");
+ }
+
+ else if (mode == MODE_FLOAT)
+ switch (code)
+ {
+ case NE:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "ne", file);
+ return;
+ case EQ:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "e", file);
+ return;
+ case GE:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "nb", file);
+ return;
+ case GT:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "nbe", file);
+ return;
+ case LE:
+ fputs (ieee ? (reverse_cc ? "nb" : "b") : "be", file);
+ return;
+ case LT:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "b", file);
+ return;
+ case GEU:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "nb", file);
+ return;
+ case GTU:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "nbe", file);
+ return;
+ case LEU:
+ fputs (ieee ? (reverse_cc ? "nb" : "b") : "be", file);
+ return;
+ case LTU:
+ fputs (ieee ? (reverse_cc ? "ne" : "e") : "b", file);
+ return;
+ default:
+ output_operand_lossage ("Invalid %%C operand");
+ }
+}
+
/* Meaning of CODE:
- f -- float insn (print a CONST_DOUBLE as a float rather than in hex).
- D,L,W,B,Q,S -- print the opcode suffix for specified size of operand.
+ L,W,B,Q,S,T -- print the opcode suffix for specified size of operand.
+ C -- print opcode suffix for set/cmov insn.
+ c -- like C, but print reversed condition
+ F -- print opcode suffix for fcmov insn.
+ f -- like C, but print reversed condition
R -- print the prefix for register names.
z -- print the opcode suffix for the size of the current operand.
* -- print a star (in certain assembler syntax)
w -- print the operand as if it's a "word" (HImode) even if it isn't.
c -- don't print special prefixes before constant operands.
J -- print the appropriate jump operand.
-*/
+ s -- print a shift double count, followed by the assemblers argument
+ delimiter.
+ b -- print the QImode name of the register for the indicated operand.
+ %b0 would print %al if operands[0] is reg 0.
+ w -- likewise, print the HImode name of the register.
+ k -- likewise, print the SImode name of the register.
+ h -- print the QImode name for a "high" register, either ah, bh, ch or dh.
+ y -- print "st(0)" instead of "st" as a register.
+ P -- print as a PIC constant */
void
print_operand (file, x, code)
@@ -2384,6 +3339,7 @@ print_operand (file, x, code)
case 'h':
case 'y':
case 'P':
+ case 'X':
break;
case 'J':
@@ -2403,8 +3359,37 @@ print_operand (file, x, code)
case LTU: fputs ("#branch never", file); return;
/* no matching branches for GT nor LE */
+
+ default:
+ abort ();
+ }
+
+ case 's':
+ if (GET_CODE (x) == CONST_INT || ! SHIFT_DOUBLE_OMITS_COUNT)
+ {
+ PRINT_OPERAND (file, x, 0);
+ fputs (AS2C (,) + 1, file);
}
- abort ();
+
+ return;
+
+ /* This is used by the conditional move instructions. */
+ case 'C':
+ put_condition_code (GET_CODE (x), 0, MODE_INT, file);
+ return;
+
+ /* Like above, but reverse condition */
+ case 'c':
+ put_condition_code (GET_CODE (x), 1, MODE_INT, file); return;
+
+ case 'F':
+ put_condition_code (GET_CODE (x), 0, MODE_FLOAT, file);
+ return;
+
+ /* Like above, but reverse condition */
+ case 'f':
+ put_condition_code (GET_CODE (x), 1, MODE_FLOAT, file);
+ return;
default:
{
@@ -2415,10 +3400,12 @@ print_operand (file, x, code)
}
}
}
+
if (GET_CODE (x) == REG)
{
PRINT_REG (x, code, file);
}
+
else if (GET_CODE (x) == MEM)
{
PRINT_PTR (x, file);
@@ -2432,30 +3419,39 @@ print_operand (file, x, code)
else
output_address (XEXP (x, 0));
}
+
else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == SFmode)
{
- REAL_VALUE_TYPE r; long l;
+ REAL_VALUE_TYPE r;
+ long l;
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
REAL_VALUE_TO_TARGET_SINGLE (r, l);
PRINT_IMMED_PREFIX (file);
- fprintf (file, "0x%x", l);
+ fprintf (file, "0x%lx", l);
}
+
/* These float cases don't actually occur as immediate operands. */
else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == DFmode)
{
- REAL_VALUE_TYPE r; char dstr[30];
+ REAL_VALUE_TYPE r;
+ char dstr[30];
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
fprintf (file, "%s", dstr);
}
+
else if (GET_CODE (x) == CONST_DOUBLE && GET_MODE (x) == XFmode)
{
- REAL_VALUE_TYPE r; char dstr[30];
+ REAL_VALUE_TYPE r;
+ char dstr[30];
+
REAL_VALUE_FROM_CONST_DOUBLE (r, x);
REAL_VALUE_TO_DECIMAL (r, "%.22e", dstr);
fprintf (file, "%s", dstr);
}
- else
+ else
{
if (code != 'P')
{
@@ -2507,38 +3503,35 @@ print_operand_address (file, addr)
offset = XEXP (addr, 1);
addr = XEXP (addr, 0);
}
- if (GET_CODE (addr) != PLUS) ;
+
+ if (GET_CODE (addr) != PLUS)
+ ;
else if (GET_CODE (XEXP (addr, 0)) == MULT)
- {
- reg1 = XEXP (addr, 0);
- addr = XEXP (addr, 1);
- }
+ reg1 = XEXP (addr, 0), addr = XEXP (addr, 1);
else if (GET_CODE (XEXP (addr, 1)) == MULT)
- {
- reg1 = XEXP (addr, 1);
- addr = XEXP (addr, 0);
- }
+ reg1 = XEXP (addr, 1), addr = XEXP (addr, 0);
else if (GET_CODE (XEXP (addr, 0)) == REG)
- {
- reg1 = XEXP (addr, 0);
- addr = XEXP (addr, 1);
- }
+ reg1 = XEXP (addr, 0), addr = XEXP (addr, 1);
else if (GET_CODE (XEXP (addr, 1)) == REG)
- {
- reg1 = XEXP (addr, 1);
- addr = XEXP (addr, 0);
- }
+ reg1 = XEXP (addr, 1), addr = XEXP (addr, 0);
+
if (GET_CODE (addr) == REG || GET_CODE (addr) == MULT)
{
- if (reg1 == 0) reg1 = addr;
- else reg2 = addr;
+ if (reg1 == 0)
+ reg1 = addr;
+ else
+ reg2 = addr;
+
addr = 0;
}
+
if (offset != 0)
{
- if (addr != 0) abort ();
+ if (addr != 0)
+ abort ();
addr = offset;
}
+
if ((reg1 && GET_CODE (reg1) == MULT)
|| (reg2 != 0 && REGNO_OK_FOR_BASE_P (REGNO (reg2))))
{
@@ -2559,10 +3552,8 @@ print_operand_address (file, addr)
{
if (flag_pic)
output_pic_addr_const (file, addr, 0);
-
else if (GET_CODE (addr) == LABEL_REF)
output_asm_label (addr);
-
else
output_addr_const (file, addr);
}
@@ -2593,6 +3584,7 @@ print_operand_address (file, addr)
case MULT:
{
int scale;
+
if (GET_CODE (XEXP (addr, 0)) == CONST_INT)
{
scale = INTVAL (XEXP (addr, 0));
@@ -2603,8 +3595,9 @@ print_operand_address (file, addr)
scale = INTVAL (XEXP (addr, 1));
ireg = XEXP (addr, 0);
}
+
output_addr_const (file, const0_rtx);
- PRINT_B_I_S ((rtx) 0, ireg, scale, file);
+ PRINT_B_I_S (NULL_RTX, ireg, scale, file);
}
break;
@@ -2612,7 +3605,7 @@ print_operand_address (file, addr)
if (GET_CODE (addr) == CONST_INT
&& INTVAL (addr) < 0x8000
&& INTVAL (addr) >= -0x8000)
- fprintf (file, "%d", INTVAL (addr));
+ fprintf (file, "%d", (int) INTVAL (addr));
else
{
if (flag_pic)
@@ -2625,7 +3618,7 @@ print_operand_address (file, addr)
/* Set the cc_status for the results of an insn whose pattern is EXP.
On the 80386, we assume that only test and compare insns, as well
- as SI, HI, & DI mode ADD, SUB, NEG, AND, IOR, XOR, ASHIFT,
+ as SI, HI, & DI mode ADD, SUB, NEG, AND, IOR, XOR, BSF, ASHIFT,
ASHIFTRT, and LSHIFTRT instructions set the condition codes usefully.
Also, we assume that jumps, moves and sCOND don't affect the condition
codes. All else clobbers the condition codes, by assumption.
@@ -2645,40 +3638,51 @@ notice_update_cc (exp)
/* Jumps do not alter the cc's. */
if (SET_DEST (exp) == pc_rtx)
return;
+
/* Moving register or memory into a register:
it doesn't alter the cc's, but it might invalidate
the RTX's which we remember the cc's came from.
(Note that moving a constant 0 or 1 MAY set the cc's). */
if (REG_P (SET_DEST (exp))
&& (REG_P (SET_SRC (exp)) || GET_CODE (SET_SRC (exp)) == MEM
- || GET_RTX_CLASS (GET_CODE (SET_SRC (exp))) == '<'))
+ || GET_RTX_CLASS (GET_CODE (SET_SRC (exp))) == '<'
+ || (GET_CODE (SET_SRC (exp)) == IF_THEN_ELSE
+ && GET_MODE_CLASS (GET_MODE (SET_DEST (exp))) == MODE_INT)))
{
if (cc_status.value1
&& reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value1))
cc_status.value1 = 0;
+
if (cc_status.value2
&& reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value2))
cc_status.value2 = 0;
+
return;
}
+
/* Moving register into memory doesn't alter the cc's.
It may invalidate the RTX's which we remember the cc's came from. */
if (GET_CODE (SET_DEST (exp)) == MEM
&& (REG_P (SET_SRC (exp))
|| GET_RTX_CLASS (GET_CODE (SET_SRC (exp))) == '<'))
{
- if (cc_status.value1 && GET_CODE (cc_status.value1) == MEM)
+ if (cc_status.value1
+ && reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value1))
cc_status.value1 = 0;
- if (cc_status.value2 && GET_CODE (cc_status.value2) == MEM)
+ if (cc_status.value2
+ && reg_overlap_mentioned_p (SET_DEST (exp), cc_status.value2))
cc_status.value2 = 0;
+
return;
}
+
/* Function calls clobber the cc's. */
else if (GET_CODE (SET_SRC (exp)) == CALL)
{
CC_STATUS_INIT;
return;
}
+
/* Tests and compares set the cc's in predictable ways. */
else if (SET_DEST (exp) == cc0_rtx)
{
@@ -2686,14 +3690,14 @@ notice_update_cc (exp)
cc_status.value1 = SET_SRC (exp);
return;
}
+
/* Certain instructions effect the condition codes. */
else if (GET_MODE (SET_SRC (exp)) == SImode
|| GET_MODE (SET_SRC (exp)) == HImode
|| GET_MODE (SET_SRC (exp)) == QImode)
switch (GET_CODE (SET_SRC (exp)))
{
- case ASHIFTRT: case LSHIFTRT:
- case ASHIFT:
+ case ASHIFTRT: case LSHIFTRT: case ASHIFT:
/* Shifts on the 386 don't set the condition codes if the
shift count is zero. */
if (GET_CODE (XEXP (SET_SRC (exp), 1)) != CONST_INT)
@@ -2701,6 +3705,7 @@ notice_update_cc (exp)
CC_STATUS_INIT;
break;
}
+
/* We assume that the CONST_INT is non-zero (this rtx would
have been deleted if it were zero. */
@@ -2711,6 +3716,19 @@ notice_update_cc (exp)
cc_status.value2 = SET_DEST (exp);
break;
+ /* This is the bsf pattern used by ffs. */
+ case UNSPEC:
+ if (XINT (SET_SRC (exp), 1) == 5)
+ {
+ /* Only the Z flag is defined after bsf. */
+ cc_status.flags
+ = CC_NOT_POSITIVE | CC_NOT_NEGATIVE | CC_NO_OVERFLOW;
+ cc_status.value1 = XVECEXP (SET_SRC (exp), 0, 0);
+ cc_status.value2 = 0;
+ break;
+ }
+ /* FALLTHRU */
+
default:
CC_STATUS_INIT;
}
@@ -2725,14 +3743,21 @@ notice_update_cc (exp)
if (SET_DEST (XVECEXP (exp, 0, 0)) == pc_rtx)
return;
if (SET_DEST (XVECEXP (exp, 0, 0)) == cc0_rtx)
+
{
CC_STATUS_INIT;
- if (stack_regs_mentioned_p (SET_SRC (XVECEXP (exp, 0, 0))))
- cc_status.flags |= CC_IN_80387;
+ if (stack_regs_mentioned_p (SET_SRC (XVECEXP (exp, 0, 0))))
+ {
+ cc_status.flags |= CC_IN_80387;
+ if (0 && TARGET_CMOVE && stack_regs_mentioned_p
+ (XEXP (SET_SRC (XVECEXP (exp, 0, 0)), 1)))
+ cc_status.flags |= CC_FCOMI;
+ }
else
cc_status.value1 = SET_SRC (XVECEXP (exp, 0, 0));
return;
}
+
CC_STATUS_INIT;
}
else
@@ -2755,19 +3780,20 @@ split_di (operands, num, lo_half, hi_half)
{
while (num--)
{
- if (GET_CODE (operands[num]) == REG)
- {
- lo_half[num] = gen_rtx (REG, SImode, REGNO (operands[num]));
- hi_half[num] = gen_rtx (REG, SImode, REGNO (operands[num]) + 1);
- }
- else if (CONSTANT_P (operands[num]))
+ rtx op = operands[num];
+ if (GET_CODE (op) == REG)
{
- split_double (operands[num], &lo_half[num], &hi_half[num]);
+ lo_half[num] = gen_rtx_REG (SImode, REGNO (op));
+ hi_half[num] = gen_rtx_REG (SImode, REGNO (op) + 1);
}
- else if (offsettable_memref_p (operands[num]))
+ else if (CONSTANT_P (op))
+ split_double (op, &lo_half[num], &hi_half[num]);
+ else if (offsettable_memref_p (op))
{
- lo_half[num] = operands[num];
- hi_half[num] = adj_offsettable_operand (operands[num], 4);
+ rtx lo_addr = XEXP (op, 0);
+ rtx hi_addr = XEXP (adj_offsettable_operand (op, 4), 0);
+ lo_half[num] = change_address (op, SImode, lo_addr);
+ hi_half[num] = change_address (op, SImode, hi_addr);
}
else
abort();
@@ -2797,7 +3823,6 @@ binary_387_op (op, mode)
return 0;
}
}
-
/* Return 1 if this is a valid shift or rotate operation on a 386.
OP is the expression matched, and MODE is its mode. */
@@ -2829,7 +3854,7 @@ shift_op (op, mode)
int
VOIDmode_compare_op (op, mode)
register rtx op;
- enum machine_mode mode;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
{
return GET_CODE (op) == COMPARE && GET_MODE (op) == VOIDmode;
}
@@ -2908,16 +3933,22 @@ output_387_binary_op (insn, operands)
if (NON_STACK_REG_P (operands[1]))
{
output_op_from_reg (operands[1], strcat (buf, AS1 (%z0,%1)));
- RET;
+ return "";
}
+
else if (NON_STACK_REG_P (operands[2]))
{
output_op_from_reg (operands[2], strcat (buf, AS1 (%z0,%1)));
- RET;
+ return "";
}
if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
- return strcat (buf, AS2 (p,%2,%0));
+ {
+ if (STACK_TOP_P (operands[0]))
+ return strcat (buf, AS2 (p,%0,%2));
+ else
+ return strcat (buf, AS2 (p,%2,%0));
+ }
if (STACK_TOP_P (operands[0]))
return strcat (buf, AS2C (%y2,%0));
@@ -2935,22 +3966,33 @@ output_387_binary_op (insn, operands)
if (NON_STACK_REG_P (operands[1]))
{
output_op_from_reg (operands[1], strcat (buf, AS1 (r%z0,%1)));
- RET;
+ return "";
}
+
else if (NON_STACK_REG_P (operands[2]))
{
output_op_from_reg (operands[2], strcat (buf, AS1 (%z0,%1)));
- RET;
+ return "";
}
if (! STACK_REG_P (operands[1]) || ! STACK_REG_P (operands[2]))
abort ();
if (find_regno_note (insn, REG_DEAD, REGNO (operands[2])))
- return strcat (buf, AS2 (rp,%2,%0));
+ {
+ if (STACK_TOP_P (operands[0]))
+ return strcat (buf, AS2 (p,%0,%2));
+ else
+ return strcat (buf, AS2 (rp,%2,%0));
+ }
if (find_regno_note (insn, REG_DEAD, REGNO (operands[1])))
- return strcat (buf, AS2 (p,%1,%0));
+ {
+ if (STACK_TOP_P (operands[0]))
+ return strcat (buf, AS2 (rp,%0,%1));
+ else
+ return strcat (buf, AS2 (p,%1,%0));
+ }
if (STACK_TOP_P (operands[0]))
{
@@ -2983,8 +4025,7 @@ output_fix_trunc (insn, operands)
int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
rtx xops[2];
- if (! STACK_TOP_P (operands[1]) ||
- (GET_MODE (operands[0]) == DImode && ! stack_top_dies))
+ if (! STACK_TOP_P (operands[1]))
abort ();
xops[0] = GEN_INT (12);
@@ -2997,11 +4038,23 @@ output_fix_trunc (insn, operands)
output_asm_insn (AS1 (fldc%W3,%3), operands);
if (NON_STACK_REG_P (operands[0]))
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, operands[3]);
+
else if (GET_CODE (operands[0]) == MEM)
{
if (stack_top_dies)
output_asm_insn (AS1 (fistp%z0,%0), operands);
+ else if (GET_MODE (operands[0]) == DImode && ! stack_top_dies)
+ {
+ /* There is no DImode version of this without a stack pop, so
+ we must emulate it. It doesn't matter much what the second
+ instruction is, because the value being pushed on the FP stack
+ is not used except for the following stack popping store.
+ This case can only happen without optimization, so it doesn't
+ matter that it is inefficient. */
+ output_asm_insn (AS1 (fistp%z0,%0), operands);
+ output_asm_insn (AS1 (fild%z0,%0), operands);
+ }
else
output_asm_insn (AS1 (fist%z0,%0), operands);
}
@@ -3024,6 +4077,21 @@ output_float_compare (insn, operands)
int stack_top_dies;
rtx body = XVECEXP (PATTERN (insn), 0, 0);
int unordered_compare = GET_MODE (SET_SRC (body)) == CCFPEQmode;
+ rtx tmp;
+
+ if (0 && TARGET_CMOVE && STACK_REG_P (operands[1]))
+ {
+ cc_status.flags |= CC_FCOMI;
+ cc_prev_status.flags &= ~CC_TEST_AX;
+ }
+
+ if (! STACK_TOP_P (operands[0]))
+ {
+ tmp = operands[0];
+ operands[0] = operands[1];
+ operands[1] = tmp;
+ cc_status.flags |= CC_REVERSED;
+ }
if (! STACK_TOP_P (operands[0]))
abort ();
@@ -3040,9 +4108,27 @@ output_float_compare (insn, operands)
`fcompp' float compare */
if (unordered_compare)
- output_asm_insn ("fucompp", operands);
+ {
+ if (cc_status.flags & CC_FCOMI)
+ {
+ output_asm_insn (AS2 (fucomip,%y1,%0), operands);
+ output_asm_insn (AS1 (fstp, %y0), operands);
+ return "";
+ }
+ else
+ output_asm_insn ("fucompp", operands);
+ }
else
- output_asm_insn ("fcompp", operands);
+ {
+ if (cc_status.flags & CC_FCOMI)
+ {
+ output_asm_insn (AS2 (fcomip, %y1,%0), operands);
+ output_asm_insn (AS1 (fstp, %y0), operands);
+ return "";
+ }
+ else
+ output_asm_insn ("fcompp", operands);
+ }
}
else
{
@@ -3052,9 +4138,9 @@ output_float_compare (insn, operands)
unordered float compare. */
if (unordered_compare)
- strcpy (buf, "fucom");
+ strcpy (buf, (cc_status.flags & CC_FCOMI) ? "fucomi" : "fucom");
else if (GET_MODE_CLASS (GET_MODE (operands[1])) == MODE_FLOAT)
- strcpy (buf, "fcom");
+ strcpy (buf, (cc_status.flags & CC_FCOMI) ? "fcomi" : "fcom");
else
strcpy (buf, "ficom");
@@ -3065,6 +4151,11 @@ output_float_compare (insn, operands)
if (NON_STACK_REG_P (operands[1]))
output_op_from_reg (operands[1], strcat (buf, AS1 (%z0,%1)));
+ else if (cc_status.flags & CC_FCOMI)
+ {
+ output_asm_insn (strcat (buf, AS2 (%z1,%y1,%0)), operands);
+ return "";
+ }
else
output_asm_insn (strcat (buf, AS1 (%z1,%y1)), operands);
}
@@ -3084,15 +4175,39 @@ output_fp_cc0_set (insn)
rtx insn;
{
rtx xops[3];
- rtx unordered_label;
rtx next;
enum rtx_code code;
- xops[0] = gen_rtx (REG, HImode, 0);
+ xops[0] = gen_rtx_REG (HImode, 0);
output_asm_insn (AS1 (fnsts%W0,%0), xops);
if (! TARGET_IEEE_FP)
- return "sahf";
+ {
+ if (!(cc_status.flags & CC_REVERSED))
+ {
+ next = next_cc0_user (insn);
+
+ if (GET_CODE (next) == JUMP_INSN
+ && GET_CODE (PATTERN (next)) == SET
+ && SET_DEST (PATTERN (next)) == pc_rtx
+ && GET_CODE (SET_SRC (PATTERN (next))) == IF_THEN_ELSE)
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (next)), 0));
+ else if (GET_CODE (PATTERN (next)) == SET)
+ code = GET_CODE (SET_SRC (PATTERN (next)));
+ else
+ return "sahf";
+
+ if (code == GT || code == LT || code == EQ || code == NE
+ || code == LE || code == GE)
+ {
+ /* We will test eax directly. */
+ cc_status.flags |= CC_TEST_AX;
+ return "";
+ }
+ }
+
+ return "sahf";
+ }
next = next_cc0_user (insn);
if (next == NULL_RTX)
@@ -3102,17 +4217,27 @@ output_fp_cc0_set (insn)
&& GET_CODE (PATTERN (next)) == SET
&& SET_DEST (PATTERN (next)) == pc_rtx
&& GET_CODE (SET_SRC (PATTERN (next))) == IF_THEN_ELSE)
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (next)), 0));
+ else if (GET_CODE (PATTERN (next)) == SET)
{
- code = GET_CODE (XEXP (SET_SRC (PATTERN (next)), 0));
+ if (GET_CODE (SET_SRC (PATTERN (next))) == IF_THEN_ELSE)
+ code = GET_CODE (XEXP (SET_SRC (PATTERN (next)), 0));
+ else
+ code = GET_CODE (SET_SRC (PATTERN (next)));
}
- else if (GET_CODE (PATTERN (next)) == SET)
+
+ else if (GET_CODE (PATTERN (next)) == PARALLEL
+ && GET_CODE (XVECEXP (PATTERN (next), 0, 0)) == SET)
{
- code = GET_CODE (SET_SRC (PATTERN (next)));
+ if (GET_CODE (SET_SRC (XVECEXP (PATTERN (next), 0, 0))) == IF_THEN_ELSE)
+ code = GET_CODE (XEXP (SET_SRC (XVECEXP (PATTERN (next), 0, 0)), 0));
+ else
+ code = GET_CODE (SET_SRC (XVECEXP (PATTERN (next), 0, 0)));
}
else
abort ();
- xops[0] = gen_rtx (REG, QImode, 0);
+ xops[0] = gen_rtx_REG (QImode, 0);
switch (code)
{
@@ -3168,7 +4293,8 @@ output_fp_cc0_set (insn)
default:
abort ();
}
- RET;
+
+ return "";
}
#define MAX_386_STACK_LOCALS 2
@@ -3179,6 +4305,8 @@ static rtx i386_stack_locals[(int) MAX_MACHINE_MODE][MAX_386_STACK_LOCALS];
struct machine_function
{
rtx i386_stack_locals[(int) MAX_MACHINE_MODE][MAX_386_STACK_LOCALS];
+ rtx pic_label_rtx;
+ char pic_label_name[256];
};
/* Functions to save and restore i386_stack_locals.
@@ -3189,9 +4317,12 @@ void
save_386_machine_status (p)
struct function *p;
{
- p->machine = (struct machine_function *) xmalloc (sizeof i386_stack_locals);
+ p->machine
+ = (struct machine_function *) xmalloc (sizeof (struct machine_function));
bcopy ((char *) i386_stack_locals, (char *) p->machine->i386_stack_locals,
sizeof i386_stack_locals);
+ p->machine->pic_label_rtx = pic_label_rtx;
+ bcopy (pic_label_name, p->machine->pic_label_name, 256);
}
void
@@ -3200,7 +4331,10 @@ restore_386_machine_status (p)
{
bcopy ((char *) p->machine->i386_stack_locals, (char *) i386_stack_locals,
sizeof i386_stack_locals);
+ pic_label_rtx = p->machine->pic_label_rtx;
+ bcopy (p->machine->pic_label_name, pic_label_name, 256);
free (p->machine);
+ p->machine = NULL;
}
/* Clear stack slot assignments remembered from previous functions.
@@ -3218,6 +4352,8 @@ clear_386_stack_locals ()
for (n = 0; n < MAX_386_STACK_LOCALS; n++)
i386_stack_locals[(int) mode][n] = NULL_RTX;
+ pic_label_rtx = NULL_RTX;
+ bzero (pic_label_name, 256);
/* Arrange to save and restore i386_stack_locals around nested functions. */
save_machine_status = save_386_machine_status;
restore_machine_status = restore_386_machine_status;
@@ -3243,3 +4379,897 @@ assign_386_stack_local (mode, n)
return i386_stack_locals[(int) mode][n];
}
+
+int is_mul(op,mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == MULT);
+}
+
+int is_div(op,mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == DIV);
+}
+
+#ifdef NOTYET
+/* Create a new copy of an rtx.
+ Recursively copies the operands of the rtx,
+ except for those few rtx codes that are sharable.
+ Doesn't share CONST */
+
+rtx
+copy_all_rtx (orig)
+ register rtx orig;
+{
+ register rtx copy;
+ register int i, j;
+ register RTX_CODE code;
+ register char *format_ptr;
+
+ code = GET_CODE (orig);
+
+ switch (code)
+ {
+ case REG:
+ case QUEUED:
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case CODE_LABEL:
+ case PC:
+ case CC0:
+ case SCRATCH:
+ /* SCRATCH must be shared because they represent distinct values. */
+ return orig;
+
+#if 0
+ case CONST:
+ /* CONST can be shared if it contains a SYMBOL_REF. If it contains
+ a LABEL_REF, it isn't sharable. */
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (orig, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (orig, 0), 1)) == CONST_INT)
+ return orig;
+ break;
+#endif
+ /* A MEM with a constant address is not sharable. The problem is that
+ the constant address may need to be reloaded. If the mem is shared,
+ then reloading one copy of this mem will cause all copies to appear
+ to have been reloaded. */
+ }
+
+ copy = rtx_alloc (code);
+ PUT_MODE (copy, GET_MODE (orig));
+ copy->in_struct = orig->in_struct;
+ copy->volatil = orig->volatil;
+ copy->unchanging = orig->unchanging;
+ copy->integrated = orig->integrated;
+ /* intel1 */
+ copy->is_spill_rtx = orig->is_spill_rtx;
+
+ format_ptr = GET_RTX_FORMAT (GET_CODE (copy));
+
+ for (i = 0; i < GET_RTX_LENGTH (GET_CODE (copy)); i++)
+ {
+ switch (*format_ptr++)
+ {
+ case 'e':
+ XEXP (copy, i) = XEXP (orig, i);
+ if (XEXP (orig, i) != NULL)
+ XEXP (copy, i) = copy_rtx (XEXP (orig, i));
+ break;
+
+ case '0':
+ case 'u':
+ XEXP (copy, i) = XEXP (orig, i);
+ break;
+
+ case 'E':
+ case 'V':
+ XVEC (copy, i) = XVEC (orig, i);
+ if (XVEC (orig, i) != NULL)
+ {
+ XVEC (copy, i) = rtvec_alloc (XVECLEN (orig, i));
+ for (j = 0; j < XVECLEN (copy, i); j++)
+ XVECEXP (copy, i, j) = copy_rtx (XVECEXP (orig, i, j));
+ }
+ break;
+
+ case 'w':
+ XWINT (copy, i) = XWINT (orig, i);
+ break;
+
+ case 'i':
+ XINT (copy, i) = XINT (orig, i);
+ break;
+
+ case 's':
+ case 'S':
+ XSTR (copy, i) = XSTR (orig, i);
+ break;
+
+ default:
+ abort ();
+ }
+ }
+ return copy;
+}
+
+
+/* Try to rewrite a memory address to make it valid */
+
+void
+rewrite_address (mem_rtx)
+ rtx mem_rtx;
+{
+ rtx index_rtx, base_rtx, offset_rtx, scale_rtx, ret_rtx;
+ int scale = 1;
+ int offset_adjust = 0;
+ int was_only_offset = 0;
+ rtx mem_addr = XEXP (mem_rtx, 0);
+ char *storage = oballoc (0);
+ int in_struct = 0;
+ int is_spill_rtx = 0;
+
+ in_struct = MEM_IN_STRUCT_P (mem_rtx);
+ is_spill_rtx = RTX_IS_SPILL_P (mem_rtx);
+
+ if (GET_CODE (mem_addr) == PLUS
+ && GET_CODE (XEXP (mem_addr, 1)) == PLUS
+ && GET_CODE (XEXP (XEXP (mem_addr, 1), 0)) == REG)
+ {
+ /* This part is utilized by the combiner. */
+ ret_rtx
+ = gen_rtx (PLUS, GET_MODE (mem_addr),
+ gen_rtx (PLUS, GET_MODE (XEXP (mem_addr, 1)),
+ XEXP (mem_addr, 0), XEXP (XEXP (mem_addr, 1), 0)),
+ XEXP (XEXP (mem_addr, 1), 1));
+
+ if (memory_address_p (GET_MODE (mem_rtx), ret_rtx))
+ {
+ XEXP (mem_rtx, 0) = ret_rtx;
+ RTX_IS_SPILL_P (ret_rtx) = is_spill_rtx;
+ return;
+ }
+
+ obfree (storage);
+ }
+
+ /* This part is utilized by loop.c.
+ If the address contains PLUS (reg,const) and this pattern is invalid
+ in this case - try to rewrite the address to make it valid. */
+ storage = oballoc (0);
+ index_rtx = base_rtx = offset_rtx = NULL;
+
+ /* Find the base index and offset elements of the memory address. */
+ if (GET_CODE (mem_addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (mem_addr, 0)) == REG)
+ {
+ if (GET_CODE (XEXP (mem_addr, 1)) == REG)
+ base_rtx = XEXP (mem_addr, 1), index_rtx = XEXP (mem_addr, 0);
+ else
+ base_rtx = XEXP (mem_addr, 0), offset_rtx = XEXP (mem_addr, 1);
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == MULT)
+ {
+ index_rtx = XEXP (mem_addr, 0);
+ if (GET_CODE (XEXP (mem_addr, 1)) == REG)
+ base_rtx = XEXP (mem_addr, 1);
+ else
+ offset_rtx = XEXP (mem_addr, 1);
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == PLUS)
+ {
+ if (GET_CODE (XEXP (XEXP (mem_addr, 0), 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0)) == MULT
+ && (GET_CODE (XEXP (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0), 0))
+ == REG)
+ && (GET_CODE (XEXP (XEXP (XEXP (XEXP (mem_addr, 0), 0), 0), 1))
+ == CONST_INT)
+ && (GET_CODE (XEXP (XEXP (XEXP (mem_addr, 0), 0), 1))
+ == CONST_INT)
+ && GET_CODE (XEXP (XEXP (mem_addr, 0), 1)) == REG
+ && GET_CODE (XEXP (mem_addr, 1)) == SYMBOL_REF)
+ {
+ index_rtx = XEXP (XEXP (XEXP (mem_addr, 0), 0), 0);
+ offset_rtx = XEXP (mem_addr, 1);
+ base_rtx = XEXP (XEXP (mem_addr, 0), 1);
+ offset_adjust = INTVAL (XEXP (XEXP (XEXP (mem_addr, 0), 0), 1));
+ }
+ else
+ {
+ offset_rtx = XEXP (mem_addr, 1);
+ index_rtx = XEXP (XEXP (mem_addr, 0), 0);
+ base_rtx = XEXP (XEXP (mem_addr, 0), 1);
+ }
+ }
+
+ else if (GET_CODE (XEXP (mem_addr, 0)) == CONST_INT)
+ {
+ was_only_offset = 1;
+ index_rtx = NULL;
+ base_rtx = NULL;
+ offset_rtx = XEXP (mem_addr, 1);
+ offset_adjust = INTVAL (XEXP (mem_addr, 0));
+ if (offset_adjust == 0)
+ {
+ XEXP (mem_rtx, 0) = offset_rtx;
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ return;
+ }
+ }
+ else
+ {
+ obfree (storage);
+ return;
+ }
+ }
+ else if (GET_CODE (mem_addr) == MULT)
+ index_rtx = mem_addr;
+ else
+ {
+ obfree (storage);
+ return;
+ }
+
+ if (index_rtx != 0 && GET_CODE (index_rtx) == MULT)
+ {
+ if (GET_CODE (XEXP (index_rtx, 1)) != CONST_INT)
+ {
+ obfree (storage);
+ return;
+ }
+
+ scale_rtx = XEXP (index_rtx, 1);
+ scale = INTVAL (scale_rtx);
+ index_rtx = copy_all_rtx (XEXP (index_rtx, 0));
+ }
+
+ /* Now find which of the elements are invalid and try to fix them. */
+ if (index_rtx && GET_CODE (index_rtx) == CONST_INT && base_rtx == NULL)
+ {
+ offset_adjust = INTVAL (index_rtx) * scale;
+
+ if (offset_rtx != 0 && CONSTANT_P (offset_rtx))
+ offset_rtx = plus_constant (offset_rtx, offset_adjust);
+ else if (offset_rtx == 0)
+ offset_rtx = const0_rtx;
+
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ XEXP (mem_rtx, 0) = offset_rtx;
+ return;
+ }
+
+ if (base_rtx && GET_CODE (base_rtx) == PLUS
+ && GET_CODE (XEXP (base_rtx, 0)) == REG
+ && GET_CODE (XEXP (base_rtx, 1)) == CONST_INT)
+ {
+ offset_adjust += INTVAL (XEXP (base_rtx, 1));
+ base_rtx = copy_all_rtx (XEXP (base_rtx, 0));
+ }
+
+ else if (base_rtx && GET_CODE (base_rtx) == CONST_INT)
+ {
+ offset_adjust += INTVAL (base_rtx);
+ base_rtx = NULL;
+ }
+
+ if (index_rtx && GET_CODE (index_rtx) == PLUS
+ && GET_CODE (XEXP (index_rtx, 0)) == REG
+ && GET_CODE (XEXP (index_rtx, 1)) == CONST_INT)
+ {
+ offset_adjust += INTVAL (XEXP (index_rtx, 1)) * scale;
+ index_rtx = copy_all_rtx (XEXP (index_rtx, 0));
+ }
+
+ if (index_rtx)
+ {
+ if (! LEGITIMATE_INDEX_P (index_rtx)
+ && ! (index_rtx == stack_pointer_rtx && scale == 1
+ && base_rtx == NULL))
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ if (base_rtx)
+ {
+ if (! LEGITIMATE_INDEX_P (base_rtx) && GET_CODE (base_rtx) != REG)
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ if (offset_adjust != 0)
+ {
+ if (offset_rtx != 0 && CONSTANT_P (offset_rtx))
+ offset_rtx = plus_constant (offset_rtx, offset_adjust);
+ else
+ offset_rtx = const0_rtx;
+
+ if (index_rtx)
+ {
+ if (base_rtx)
+ {
+ if (scale != 1)
+ {
+ ret_rtx = gen_rtx (PLUS, GET_MODE (base_rtx),
+ gen_rtx (MULT, GET_MODE (index_rtx),
+ index_rtx, scale_rtx),
+ base_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ else
+ {
+ ret_rtx = gen_rtx (PLUS, GET_MODE (index_rtx),
+ index_rtx, base_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ }
+ else
+ {
+ if (scale != 1)
+ {
+ ret_rtx = gen_rtx (MULT, GET_MODE (index_rtx),
+ index_rtx, scale_rtx);
+
+ if (GET_CODE (offset_rtx) != CONST_INT
+ || INTVAL (offset_rtx) != 0)
+ ret_rtx = gen_rtx (PLUS, GET_MODE (ret_rtx),
+ ret_rtx, offset_rtx);
+ }
+ else
+ {
+ if (GET_CODE (offset_rtx) == CONST_INT
+ && INTVAL (offset_rtx) == 0)
+ ret_rtx = index_rtx;
+ else
+ ret_rtx = gen_rtx (PLUS, GET_MODE (index_rtx),
+ index_rtx, offset_rtx);
+ }
+ }
+ }
+ else
+ {
+ if (base_rtx)
+ {
+ if (GET_CODE (offset_rtx) == CONST_INT
+ && INTVAL (offset_rtx) == 0)
+ ret_rtx = base_rtx;
+ else
+ ret_rtx = gen_rtx (PLUS, GET_MODE (base_rtx), base_rtx,
+ offset_rtx);
+ }
+ else if (was_only_offset)
+ ret_rtx = offset_rtx;
+ else
+ {
+ obfree (storage);
+ return;
+ }
+ }
+
+ XEXP (mem_rtx, 0) = ret_rtx;
+ RTX_IS_SPILL_P (XEXP (mem_rtx, 0)) = is_spill_rtx;
+ return;
+ }
+ else
+ {
+ obfree (storage);
+ return;
+ }
+}
+#endif /* NOTYET */
+
+/* Return 1 if the first insn to set cc before INSN also sets the register
+ REG_RTX; otherwise return 0. */
+int
+last_to_set_cc (reg_rtx, insn)
+ rtx reg_rtx, insn;
+{
+ rtx prev_insn = PREV_INSN (insn);
+
+ while (prev_insn)
+ {
+ if (GET_CODE (prev_insn) == NOTE)
+ ;
+
+ else if (GET_CODE (prev_insn) == INSN)
+ {
+ if (GET_CODE (PATTERN (prev_insn)) != SET)
+ return (0);
+
+ if (rtx_equal_p (SET_DEST (PATTERN (prev_insn)), reg_rtx))
+ {
+ if (sets_condition_code (SET_SRC (PATTERN (prev_insn))))
+ return (1);
+
+ return (0);
+ }
+
+ else if (! doesnt_set_condition_code (SET_SRC (PATTERN (prev_insn))))
+ return (0);
+ }
+
+ else
+ return (0);
+
+ prev_insn = PREV_INSN (prev_insn);
+ }
+
+ return (0);
+}
+
+int
+doesnt_set_condition_code (pat)
+ rtx pat;
+{
+ switch (GET_CODE (pat))
+ {
+ case MEM:
+ case REG:
+ return 1;
+
+ default:
+ return 0;
+
+ }
+}
+
+int
+sets_condition_code (pat)
+ rtx pat;
+{
+ switch (GET_CODE (pat))
+ {
+ case PLUS:
+ case MINUS:
+ case AND:
+ case IOR:
+ case XOR:
+ case NOT:
+ case NEG:
+ case MULT:
+ case DIV:
+ case MOD:
+ case UDIV:
+ case UMOD:
+ return 1;
+
+ default:
+ return (0);
+ }
+}
+
+int
+str_immediate_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == CONST_INT && INTVAL (op) <= 32 && INTVAL (op) >= 0)
+ return 1;
+
+ return 0;
+}
+
+int
+is_fp_insn (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode))
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if the mode of the SET_DEST of insn is floating point
+ and it is not an fld or a move from memory to memory.
+ Otherwise return 0 */
+
+int
+is_fp_dest (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode)
+ && GET_CODE (SET_DEST (PATTERN (insn))) == REG
+ && REGNO (SET_DEST (PATTERN (insn))) >= FIRST_FLOAT_REG
+ && GET_CODE (SET_SRC (PATTERN (insn))) != MEM)
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if the mode of the SET_DEST of INSN is floating point and is
+ memory and the source is a register. */
+
+int
+is_fp_store (insn)
+ rtx insn;
+{
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SET
+ && (GET_MODE (SET_DEST (PATTERN (insn))) == DFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == SFmode
+ || GET_MODE (SET_DEST (PATTERN (insn))) == XFmode)
+ && GET_CODE (SET_DEST (PATTERN (insn))) == MEM
+ && GET_CODE (SET_SRC (PATTERN (insn))) == REG)
+ return 1;
+
+ return 0;
+}
+
+/* Return 1 if DEP_INSN sets a register which INSN uses as a base
+ or index to reference memory.
+ otherwise return 0 */
+
+int
+agi_dependent (insn, dep_insn)
+ rtx insn, dep_insn;
+{
+ if (GET_CODE (dep_insn) == INSN
+ && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (dep_insn))) == REG)
+ return reg_mentioned_in_mem (SET_DEST (PATTERN (dep_insn)), insn);
+
+ if (GET_CODE (dep_insn) == INSN && GET_CODE (PATTERN (dep_insn)) == SET
+ && GET_CODE (SET_DEST (PATTERN (dep_insn))) == MEM
+ && push_operand (SET_DEST (PATTERN (dep_insn)),
+ GET_MODE (SET_DEST (PATTERN (dep_insn)))))
+ return reg_mentioned_in_mem (stack_pointer_rtx, insn);
+
+ return 0;
+}
+
+/* Return 1 if reg is used in rtl as a base or index for a memory ref
+ otherwise return 0. */
+
+int
+reg_mentioned_in_mem (reg, rtl)
+ rtx reg, rtl;
+{
+ register char *fmt;
+ register int i, j;
+ register enum rtx_code code;
+
+ if (rtl == NULL)
+ return 0;
+
+ code = GET_CODE (rtl);
+
+ switch (code)
+ {
+ case HIGH:
+ case CONST_INT:
+ case CONST:
+ case CONST_DOUBLE:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case PC:
+ case CC0:
+ case SUBREG:
+ return 0;
+ default:
+ break;
+ }
+
+ if (code == MEM && reg_mentioned_p (reg, rtl))
+ return 1;
+
+ fmt = GET_RTX_FORMAT (code);
+ for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
+ {
+ if (fmt[i] == 'E')
+ {
+ for (j = XVECLEN (rtl, i) - 1; j >= 0; j--)
+ if (reg_mentioned_in_mem (reg, XVECEXP (rtl, i, j)))
+ return 1;
+ }
+
+ else if (fmt[i] == 'e' && reg_mentioned_in_mem (reg, XEXP (rtl, i)))
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Output the appropriate insns for doing strlen if not just doing repnz; scasb
+
+ operands[0] = result, initialized with the startaddress
+ operands[1] = alignment of the address.
+ operands[2] = scratch register, initialized with the startaddress when
+ not aligned, otherwise undefined
+
+ This is just the body. It needs the initialisations mentioned above and
+ some address computing at the end. These things are done in i386.md. */
+
+char *
+output_strlen_unroll (operands)
+ rtx operands[];
+{
+ rtx xops[18];
+
+ xops[0] = operands[0]; /* Result */
+ /* operands[1]; * Alignment */
+ xops[1] = operands[2]; /* Scratch */
+ xops[2] = GEN_INT (0);
+ xops[3] = GEN_INT (2);
+ xops[4] = GEN_INT (3);
+ xops[5] = GEN_INT (4);
+ /* xops[6] = gen_label_rtx (); * label when aligned to 3-byte */
+ /* xops[7] = gen_label_rtx (); * label when aligned to 2-byte */
+ xops[8] = gen_label_rtx (); /* label of main loop */
+
+ if (TARGET_USE_Q_REG && QI_REG_P (xops[1]))
+ xops[9] = gen_label_rtx (); /* pentium optimisation */
+
+ xops[10] = gen_label_rtx (); /* end label 2 */
+ xops[11] = gen_label_rtx (); /* end label 1 */
+ xops[12] = gen_label_rtx (); /* end label */
+ /* xops[13] * Temporary used */
+ xops[14] = GEN_INT (0xff);
+ xops[15] = GEN_INT (0xff00);
+ xops[16] = GEN_INT (0xff0000);
+ xops[17] = GEN_INT (0xff000000);
+
+ /* Loop to check 1..3 bytes for null to get an aligned pointer. */
+
+ /* Is there a known alignment and is it less than 4? */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) < 4)
+ {
+ /* Is there a known alignment and is it not 2? */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 2)
+ {
+ xops[6] = gen_label_rtx (); /* Label when aligned to 3-byte */
+ xops[7] = gen_label_rtx (); /* Label when aligned to 2-byte */
+
+ /* Leave just the 3 lower bits.
+ If this is a q-register, then the high part is used later
+ therefore use andl rather than andb. */
+ output_asm_insn (AS2 (and%L1,%4,%1), xops);
+
+ /* Is aligned to 4-byte address when zero */
+ output_asm_insn (AS1 (je,%l8), xops);
+
+ /* Side-effect even Parity when %eax == 3 */
+ output_asm_insn (AS1 (jp,%6), xops);
+
+ /* Is it aligned to 2 bytes ? */
+ if (QI_REG_P (xops[1]))
+ output_asm_insn (AS2 (cmp%L1,%3,%1), xops);
+ else
+ output_asm_insn (AS2 (cmp%L1,%3,%1), xops);
+
+ output_asm_insn (AS1 (je,%7), xops);
+ }
+ else
+ {
+ /* Since the alignment is 2, we have to check 2 or 0 bytes;
+ check if is aligned to 4 - byte. */
+ output_asm_insn (AS2 (and%L1,%3,%1), xops);
+
+ /* Is aligned to 4-byte address when zero */
+ output_asm_insn (AS1 (je,%l8), xops);
+ }
+
+ xops[13] = gen_rtx_MEM (QImode, xops[0]);
+
+ /* Now compare the bytes; compare with the high part of a q-reg
+ gives shorter code. */
+ if (QI_REG_P (xops[1]))
+ {
+ /* Compare the first n unaligned byte on a byte per byte basis. */
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+
+ /* When zero we reached the end. */
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Increment the address. */
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ /* Not needed with an alignment of 2 */
+ if (GET_CODE (operands[1]) != CONST_INT || INTVAL (operands[1]) != 2)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[7]));
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[6]));
+ }
+
+ output_asm_insn (AS2 (cmp%B1,%h1,%13), xops);
+ }
+ else
+ {
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[7]));
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[6]));
+ output_asm_insn (AS2 (cmp%B13,%2,%13), xops);
+ }
+
+ output_asm_insn (AS1 (je,%l12), xops);
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+ }
+
+ /* Generate loop to check 4 bytes at a time. It is not a good idea to
+ align this loop. It gives only huge programs, but does not help to
+ speed up. */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[8]));
+
+ xops[13] = gen_rtx_MEM (SImode, xops[0]);
+ output_asm_insn (AS2 (mov%L1,%13,%1), xops);
+
+ if (QI_REG_P (xops[1]))
+ {
+ /* On i586 it is faster to combine the hi- and lo- part as
+ a kind of lookahead. If anding both yields zero, then one
+ of both *could* be zero, otherwise none of both is zero;
+ this saves one instruction, on i486 this is slower
+ tested with P-90, i486DX2-66, AMD486DX2-66 */
+ if (TARGET_PENTIUM)
+ {
+ output_asm_insn (AS2 (test%B1,%h1,%b1), xops);
+ output_asm_insn (AS1 (jne,%l9), xops);
+ }
+
+ /* Check first byte. */
+ output_asm_insn (AS2 (test%B1,%b1,%b1), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Check second byte. */
+ output_asm_insn (AS2 (test%B1,%h1,%h1), xops);
+ output_asm_insn (AS1 (je,%l11), xops);
+
+ if (TARGET_PENTIUM)
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L",
+ CODE_LABEL_NUMBER (xops[9]));
+ }
+
+ else
+ {
+ /* Check first byte. */
+ output_asm_insn (AS2 (test%L1,%14,%1), xops);
+ output_asm_insn (AS1 (je,%l12), xops);
+
+ /* Check second byte. */
+ output_asm_insn (AS2 (test%L1,%15,%1), xops);
+ output_asm_insn (AS1 (je,%l11), xops);
+ }
+
+ /* Check third byte. */
+ output_asm_insn (AS2 (test%L1,%16,%1), xops);
+ output_asm_insn (AS1 (je,%l10), xops);
+
+ /* Check fourth byte and increment address. */
+ output_asm_insn (AS2 (add%L0,%5,%0), xops);
+ output_asm_insn (AS2 (test%L1,%17,%1), xops);
+ output_asm_insn (AS1 (jne,%l8), xops);
+
+ /* Now generate fixups when the compare stops within a 4-byte word. */
+ output_asm_insn (AS2 (sub%L0,%4,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[10]));
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[11]));
+ output_asm_insn (AS1 (inc%L0,%0), xops);
+
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "L", CODE_LABEL_NUMBER (xops[12]));
+
+ return "";
+}
+
+char *
+output_fp_conditional_move (which_alternative, operands)
+ int which_alternative;
+ rtx operands[];
+{
+ switch (which_alternative)
+ {
+ case 0:
+ /* r <- cond ? arg : r */
+ output_asm_insn (AS2 (fcmov%F1,%2,%0), operands);
+ break;
+
+ case 1:
+ /* r <- cond ? r : arg */
+ output_asm_insn (AS2 (fcmov%f1,%3,%0), operands);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
+
+char *
+output_int_conditional_move (which_alternative, operands)
+ int which_alternative;
+ rtx operands[];
+{
+ int code = GET_CODE (operands[1]);
+ enum machine_mode mode;
+ rtx xops[4];
+
+ /* This is very tricky. We have to do it right. For a code segement
+ like:
+
+ int foo, bar;
+ ....
+ foo = foo - x;
+ if (foo >= 0)
+ bar = y;
+
+ final_scan_insn () may delete the insn which sets CC. We have to
+ tell final_scan_insn () if it should be reinserted. When CODE is
+ GT or LE, we have to check the CC_NO_OVERFLOW bit and return
+ NULL_PTR to tell final to reinsert the test insn because the
+ conditional move cannot be handled properly without it. */
+ if ((code == GT || code == LE)
+ && (cc_prev_status.flags & CC_NO_OVERFLOW))
+ return NULL_PTR;
+
+ mode = GET_MODE (operands [0]);
+ if (mode == DImode)
+ {
+ xops [0] = gen_rtx_SUBREG (SImode, operands [0], 1);
+ xops [1] = operands [1];
+ xops [2] = gen_rtx_SUBREG (SImode, operands [2], 1);
+ xops [3] = gen_rtx_SUBREG (SImode, operands [3], 1);
+ }
+
+ switch (which_alternative)
+ {
+ case 0:
+ /* r <- cond ? arg : r */
+ output_asm_insn (AS2 (cmov%C1,%2,%0), operands);
+ if (mode == DImode)
+ output_asm_insn (AS2 (cmov%C1,%2,%0), xops);
+ break;
+
+ case 1:
+ /* r <- cond ? r : arg */
+ output_asm_insn (AS2 (cmov%c1,%3,%0), operands);
+ if (mode == DImode)
+ output_asm_insn (AS2 (cmov%c1,%3,%0), xops);
+ break;
+
+ default:
+ abort ();
+ }
+
+ return "";
+}
diff --git a/contrib/gcc/config/i386/i386.h b/contrib/gcc/config/i386/i386.h
index b00b0e5..b8fef11 100644
--- a/contrib/gcc/config/i386/i386.h
+++ b/contrib/gcc/config/i386/i386.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler for Intel X86
(386, 486, Pentium).
- Copyright (C) 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1988, 92, 94, 95, 96, 97, 1998 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -17,8 +17,7 @@ GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
+Boston, MA 02111-1307, USA. */
/* The purpose of this file is to define the characteristics of the i386,
independent of assembler syntax or operating system.
@@ -53,6 +52,20 @@ Boston, MA 02111-1307, USA. */
#define HALF_PIC_FINISH(STREAM)
#endif
+/* Define the specific costs for a given cpu */
+
+struct processor_costs {
+ int add; /* cost of an add instruction */
+ int lea; /* cost of a lea instruction */
+ int shift_var; /* variable shift costs */
+ int shift_const; /* constant shift costs */
+ int mult_init; /* cost of starting a multiply */
+ int mult_bit; /* cost of multiply per each bit set */
+ int divide; /* cost of a divide/mod */
+};
+
+extern struct processor_costs *ix86_cost;
+
/* Run-time compilation parameters selecting different hardware subsets. */
extern int target_flags;
@@ -66,20 +79,23 @@ extern int target_flags;
/* Masks for the -m switches */
#define MASK_80387 000000000001 /* Hardware floating point */
-#define MASK_486 000000000002 /* 80486 specific */
-#define MASK_NOTUSED1 000000000004 /* bit not currently used */
+#define MASK_NOTUSED1 000000000002 /* bit not currently used */
+#define MASK_NOTUSED2 000000000004 /* bit not currently used */
#define MASK_RTD 000000000010 /* Use ret that pops args */
#define MASK_ALIGN_DOUBLE 000000000020 /* align doubles to 2 word boundary */
#define MASK_SVR3_SHLIB 000000000040 /* Uninit locals into bss */
#define MASK_IEEE_FP 000000000100 /* IEEE fp comparisons */
#define MASK_FLOAT_RETURNS 000000000200 /* Return float in st(0) */
#define MASK_NO_FANCY_MATH_387 000000000400 /* Disable sin, cos, sqrt */
-
+#define MASK_OMIT_LEAF_FRAME_POINTER 0x00000800 /* omit leaf frame pointers */
/* Temporary codegen switches */
#define MASK_DEBUG_ADDR 000001000000 /* Debug GO_IF_LEGITIMATE_ADDRESS */
#define MASK_NO_WIDE_MULTIPLY 000002000000 /* Disable 32x32->64 multiplies */
#define MASK_NO_MOVE 000004000000 /* Don't generate mem->mem */
-#define MASK_DEBUG_ARG 000010000000 /* Debug function_arg */
+#define MASK_NO_PSEUDO 000010000000 /* Move op's args -> pseudos */
+#define MASK_DEBUG_ARG 000020000000 /* Debug function_arg */
+#define MASK_SCHEDULE_PROLOGUE 000040000000 /* Emit prologue as rtl */
+#define MASK_STACK_PROBE 000100000000 /* Enable stack probing */
/* Use the floating point instructions */
#define TARGET_80387 (target_flags & MASK_80387)
@@ -112,6 +128,9 @@ extern int target_flags;
This is because FreeBSD lacks these in the math-emulator-code */
#define TARGET_NO_FANCY_MATH_387 (target_flags & MASK_NO_FANCY_MATH_387)
+/* Don't create frame pointers for leaf functions */
+#define TARGET_OMIT_LEAF_FRAME_POINTER (target_flags & MASK_OMIT_LEAF_FRAME_POINTER)
+
/* Temporary switches for tuning code generation */
/* Disable 32x32->64 bit multiplies that are used for long long multiplies
@@ -119,6 +138,9 @@ extern int target_flags;
#define TARGET_NO_WIDE_MULTIPLY (target_flags & MASK_NO_WIDE_MULTIPLY)
#define TARGET_WIDE_MULTIPLY (!TARGET_NO_WIDE_MULTIPLY)
+/* Emit/Don't emit prologue as rtl */
+#define TARGET_SCHEDULE_PROLOGUE (target_flags & MASK_SCHEDULE_PROLOGUE)
+
/* Debug GO_IF_LEGITIMATE_ADDRESS */
#define TARGET_DEBUG_ADDR (target_flags & MASK_DEBUG_ADDR)
@@ -127,10 +149,25 @@ extern int target_flags;
/* Hack macros for tuning code generation */
#define TARGET_MOVE ((target_flags & MASK_NO_MOVE) == 0) /* Don't generate memory->memory */
-
-/* Specific hardware switches */
-#define TARGET_486 (target_flags & MASK_486) /* 80486DX, 80486SX, 80486DX[24] */
-#define TARGET_386 (!TARGET_486) /* 80386 */
+#define TARGET_PSEUDO ((target_flags & MASK_NO_PSEUDO) == 0) /* Move op's args into pseudos */
+
+#define TARGET_386 (ix86_cpu == PROCESSOR_I386)
+#define TARGET_486 (ix86_cpu == PROCESSOR_I486)
+#define TARGET_PENTIUM (ix86_cpu == PROCESSOR_PENTIUM)
+#define TARGET_PENTIUMPRO (ix86_cpu == PROCESSOR_PENTIUMPRO)
+#define TARGET_USE_LEAVE (ix86_cpu == PROCESSOR_I386)
+#define TARGET_PUSH_MEMORY (ix86_cpu == PROCESSOR_I386)
+#define TARGET_ZERO_EXTEND_WITH_AND (ix86_cpu != PROCESSOR_I386 \
+ && ix86_cpu != PROCESSOR_PENTIUMPRO)
+#define TARGET_DOUBLE_WITH_ADD (ix86_cpu != PROCESSOR_I386)
+#define TARGET_USE_BIT_TEST (ix86_cpu == PROCESSOR_I386)
+#define TARGET_UNROLL_STRLEN (ix86_cpu != PROCESSOR_I386)
+#define TARGET_USE_Q_REG (ix86_cpu == PROCESSOR_PENTIUM \
+ || ix86_cpu == PROCESSOR_PENTIUMPRO)
+#define TARGET_USE_ANY_REG (ix86_cpu == PROCESSOR_I486)
+#define TARGET_CMOVE (ix86_arch == PROCESSOR_PENTIUMPRO)
+#define TARGET_DEEP_BRANCH_PREDICTION (ix86_cpu == PROCESSOR_PENTIUMPRO)
+#define TARGET_STACK_PROBE (target_flags & MASK_STACK_PROBE)
#define TARGET_SWITCHES \
{ { "80387", MASK_80387 }, \
@@ -138,10 +175,12 @@ extern int target_flags;
{ "hard-float", MASK_80387 }, \
{ "soft-float", -MASK_80387 }, \
{ "no-soft-float", MASK_80387 }, \
- { "386", -MASK_486 }, \
- { "no-386", MASK_486 }, \
- { "486", MASK_486 }, \
- { "no-486", -MASK_486 }, \
+ { "386", 0 }, \
+ { "no-386", 0 }, \
+ { "486", 0 }, \
+ { "no-486", 0 }, \
+ { "pentium", 0 }, \
+ { "pentiumpro", 0 }, \
{ "rtd", MASK_RTD }, \
{ "no-rtd", -MASK_RTD }, \
{ "align-double", MASK_ALIGN_DOUBLE }, \
@@ -154,16 +193,62 @@ extern int target_flags;
{ "no-fp-ret-in-387", -MASK_FLOAT_RETURNS }, \
{ "no-fancy-math-387", MASK_NO_FANCY_MATH_387 }, \
{ "fancy-math-387", -MASK_NO_FANCY_MATH_387 }, \
+ { "omit-leaf-frame-pointer", MASK_OMIT_LEAF_FRAME_POINTER }, \
+ { "no-omit-leaf-frame-pointer",-MASK_OMIT_LEAF_FRAME_POINTER }, \
{ "no-wide-multiply", MASK_NO_WIDE_MULTIPLY }, \
{ "wide-multiply", -MASK_NO_WIDE_MULTIPLY }, \
+ { "schedule-prologue", MASK_SCHEDULE_PROLOGUE }, \
+ { "no-schedule-prologue", -MASK_SCHEDULE_PROLOGUE }, \
{ "debug-addr", MASK_DEBUG_ADDR }, \
{ "no-debug-addr", -MASK_DEBUG_ADDR }, \
{ "move", -MASK_NO_MOVE }, \
{ "no-move", MASK_NO_MOVE }, \
{ "debug-arg", MASK_DEBUG_ARG }, \
{ "no-debug-arg", -MASK_DEBUG_ARG }, \
+ { "stack-arg-probe", MASK_STACK_PROBE }, \
+ { "no-stack-arg-probe", -MASK_STACK_PROBE }, \
+ { "windows", 0 }, \
+ { "dll", 0 }, \
SUBTARGET_SWITCHES \
- { "", TARGET_DEFAULT | TARGET_CPU_DEFAULT}}
+ { "", MASK_SCHEDULE_PROLOGUE | TARGET_DEFAULT}}
+
+/* Which processor to schedule for. The cpu attribute defines a list that
+ mirrors this list, so changes to i386.md must be made at the same time. */
+
+enum processor_type
+ {PROCESSOR_I386, /* 80386 */
+ PROCESSOR_I486, /* 80486DX, 80486SX, 80486DX[24] */
+ PROCESSOR_PENTIUM,
+ PROCESSOR_PENTIUMPRO};
+
+#define PROCESSOR_I386_STRING "i386"
+#define PROCESSOR_I486_STRING "i486"
+#define PROCESSOR_I586_STRING "i586"
+#define PROCESSOR_PENTIUM_STRING "pentium"
+#define PROCESSOR_I686_STRING "i686"
+#define PROCESSOR_PENTIUMPRO_STRING "pentiumpro"
+
+extern enum processor_type ix86_cpu;
+
+extern int ix86_arch;
+
+/* Define the default processor. This is overridden by other tm.h files. */
+#define PROCESSOR_DEFAULT \
+ ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_I486) \
+ ? PROCESSOR_I486 \
+ : ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_PENTIUM) \
+ ? PROCESSOR_PENTIUM \
+ : ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_PENTIUMPRO) \
+ ? PROCESSOR_PENTIUMPRO \
+ : PROCESSOR_I386
+#define PROCESSOR_DEFAULT_STRING \
+ ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_I486) \
+ ? PROCESSOR_I486_STRING \
+ : ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_PENTIUM) \
+ ? PROCESSOR_PENTIUM_STRING \
+ : ((enum processor_type) TARGET_CPU_DEFAULT == PROCESSOR_PENTIUMPRO) \
+ ? PROCESSOR_PENTIUMPRO_STRING \
+ : PROCESSOR_I386_STRING
/* This macro is similar to `TARGET_SWITCHES' but defines names of
command options that have values. Its definition is an
@@ -175,11 +260,14 @@ extern int target_flags;
option if the fixed part matches. The actual option name is made
by appending `-m' to the specified name. */
#define TARGET_OPTIONS \
-{ { "reg-alloc=", &i386_reg_alloc_order }, \
+{ { "cpu=", &ix86_cpu_string}, \
+ { "arch=", &ix86_arch_string}, \
+ { "reg-alloc=", &i386_reg_alloc_order }, \
{ "regparm=", &i386_regparm_string }, \
{ "align-loops=", &i386_align_loops_string }, \
{ "align-jumps=", &i386_align_jumps_string }, \
{ "align-functions=", &i386_align_funcs_string }, \
+ { "branch-cost=", &i386_branch_cost_string }, \
SUBTARGET_OPTIONS \
}
@@ -198,6 +286,82 @@ extern int target_flags;
#define SUBTARGET_SWITCHES
#define SUBTARGET_OPTIONS
+/* Define this to change the optimizations performed by default. */
+#define OPTIMIZATION_OPTIONS(LEVEL,SIZE) optimization_options(LEVEL,SIZE)
+
+/* Specs for the compiler proper */
+
+#ifndef CC1_CPU_SPEC
+#define CC1_CPU_SPEC "\
+%{!mcpu*: \
+%{m386:-mcpu=i386 -march=i386} \
+%{mno-486:-mcpu=i386 -march=i386} \
+%{m486:-mcpu=i486 -march=i486} \
+%{mno-386:-mcpu=i486 -march=i486} \
+%{mno-pentium:-mcpu=i486 -march=i486} \
+%{mpentium:-mcpu=pentium} \
+%{mno-pentiumpro:-mcpu=pentium} \
+%{mpentiumpro:-mcpu=pentiumpro}}"
+#endif
+
+#define CPP_486_SPEC "%{!ansi:-Di486} -D__i486 -D__i486__"
+#define CPP_586_SPEC "%{!ansi:-Di586 -Dpentium} \
+ -D__i586 -D__i586__ -D__pentium -D__pentium__"
+#define CPP_686_SPEC "%{!ansi:-Di686 -Dpentiumpro} \
+ -D__i686 -D__i686__ -D__pentiumpro -D__pentiumpro__"
+
+#ifndef CPP_CPU_DEFAULT_SPEC
+#if TARGET_CPU_DEFAULT == 1
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_486)"
+#else
+#if TARGET_CPU_DEFAULT == 2
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_586)"
+#else
+#if TARGET_CPU_DEFAULT == 3
+#define CPP_CPU_DEFAULT_SPEC "%(cpp_686)"
+#else
+#define CPP_CPU_DEFAULT_SPEC ""
+#endif
+#endif
+#endif
+#endif /* CPP_CPU_DEFAULT_SPEC */
+
+#ifndef CPP_CPU_SPEC
+#define CPP_CPU_SPEC "\
+-Asystem(unix) -Acpu(i386) -Amachine(i386) \
+%{!ansi:-Di386} -D__i386 -D__i386__ \
+%{mcpu=i486:%(cpp_486)} %{m486:%(cpp_486)} \
+%{mpentium:%(cpp_586)} %{mcpu=pentium:%(cpp_586)} \
+%{mpentiumpro:%(cpp_686)} %{mcpu=pentiumpro:%(cpp_686)} \
+%{!mcpu*:%{!m486:%{!mpentium*:%(cpp_cpu_default)}}}"
+#endif
+
+#ifndef CC1_SPEC
+#define CC1_SPEC "%(cc1_spec) "
+#endif
+
+/* This macro defines names of additional specifications to put in the
+ specs that can be used in various specifications like CC1_SPEC. Its
+ definition is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#ifndef SUBTARGET_EXTRA_SPECS
+#define SUBTARGET_EXTRA_SPECS
+#endif
+
+#define EXTRA_SPECS \
+ { "cpp_486", CPP_486_SPEC}, \
+ { "cpp_586", CPP_586_SPEC}, \
+ { "cpp_686", CPP_686_SPEC}, \
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC }, \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cc1_cpu", CC1_CPU_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
/* target machine storage layout */
@@ -265,6 +429,79 @@ extern int target_flags;
aligned on 64 bit boundaries. */
#define BIGGEST_ALIGNMENT (TARGET_ALIGN_DOUBLE ? 64 : 32)
+/* If defined, a C expression to compute the alignment given to a
+ constant that is being placed in memory. CONSTANT is the constant
+ and ALIGN is the alignment that the object would ordinarily have.
+ The value of this macro is used instead of that alignment to align
+ the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ The typical use of this macro is to increase alignment for string
+ constants to be word aligned so that `strcpy' calls that copy
+ constants can be done inline. */
+
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ (TREE_CODE (EXP) == REAL_CST \
+ ? ((TYPE_MODE (TREE_TYPE (EXP)) == DFmode && (ALIGN) < 64) \
+ ? 64 \
+ : (TYPE_MODE (TREE_TYPE (EXP)) == XFmode && (ALIGN) < 128) \
+ ? 128 \
+ : (ALIGN)) \
+ : TREE_CODE (EXP) == STRING_CST \
+ ? ((TREE_STRING_LENGTH (EXP) >= 31 && (ALIGN) < 256) \
+ ? 256 \
+ : (ALIGN)) \
+ : (ALIGN))
+
+/* If defined, a C expression to compute the alignment for a static
+ variable. TYPE is the data type, and ALIGN is the alignment that
+ the object would ordinarily have. The value of this macro is used
+ instead of that alignment to align the object.
+
+ If this macro is not defined, then ALIGN is used.
+
+ One use of this macro is to increase alignment of medium-size
+ data to make it all fit in fewer cache lines. Another is to
+ cause character arrays to be word-aligned so that `strcpy' calls
+ that copy constants to character arrays can be done inline. */
+
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ ((AGGREGATE_TYPE_P (TYPE) \
+ && TYPE_SIZE (TYPE) \
+ && TREE_CODE (TYPE_SIZE (TYPE)) == INTEGER_CST \
+ && (TREE_INT_CST_LOW (TYPE_SIZE (TYPE)) >= 256 \
+ || TREE_INT_CST_HIGH (TYPE_SIZE (TYPE))) && (ALIGN) < 256) \
+ ? 256 \
+ : TREE_CODE (TYPE) == ARRAY_TYPE \
+ ? ((TYPE_MODE (TREE_TYPE (TYPE)) == DFmode && (ALIGN) < 64) \
+ ? 64 \
+ : (TYPE_MODE (TREE_TYPE (TYPE)) == XFmode && (ALIGN) < 128) \
+ ? 128 \
+ : (ALIGN)) \
+ : TREE_CODE (TYPE) == COMPLEX_TYPE \
+ ? ((TYPE_MODE (TYPE) == DCmode && (ALIGN) < 64) \
+ ? 64 \
+ : (TYPE_MODE (TYPE) == XCmode && (ALIGN) < 128) \
+ ? 128 \
+ : (ALIGN)) \
+ : ((TREE_CODE (TYPE) == RECORD_TYPE \
+ || TREE_CODE (TYPE) == UNION_TYPE \
+ || TREE_CODE (TYPE) == QUAL_UNION_TYPE) \
+ && TYPE_FIELDS (TYPE)) \
+ ? ((DECL_MODE (TYPE_FIELDS (TYPE)) == DFmode && (ALIGN) < 64) \
+ ? 64 \
+ : (DECL_MODE (TYPE_FIELDS (TYPE)) == XFmode && (ALIGN) < 128) \
+ ? 128 \
+ : (ALIGN)) \
+ : TREE_CODE (TYPE) == REAL_TYPE \
+ ? ((TYPE_MODE (TYPE) == DFmode && (ALIGN) < 64) \
+ ? 64 \
+ : (TYPE_MODE (TYPE) == XFmode && (ALIGN) < 128) \
+ ? 128 \
+ : (ALIGN)) \
+ : (ALIGN))
+
/* Set this non-zero if move instructions will actually fail to work
when given unaligned data. */
#define STRICT_ALIGNMENT 0
@@ -278,12 +515,14 @@ extern int target_flags;
#define MAX_CODE_ALIGN 6 /* 64 byte alignment */
/* Align loop starts for optimal branching. */
-#define ASM_OUTPUT_LOOP_ALIGN(FILE) ASM_OUTPUT_ALIGN (FILE, i386_align_loops)
+#define LOOP_ALIGN(LABEL) (i386_align_loops)
+#define LOOP_ALIGN_MAX_SKIP (i386_align_loops_string ? 0 : 7)
/* This is how to align an instruction for optimal branching.
On i486 we'll get better performance by aligning on a
cache line (i.e. 16 byte) boundary. */
-#define ASM_OUTPUT_ALIGN_CODE(FILE) ASM_OUTPUT_ALIGN ((FILE), i386_align_jumps)
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) (i386_align_jumps)
+#define LABEL_ALIGN_AFTER_BARRIER_MAX_SKIP (i386_align_jumps_string ? 0 : 7)
/* Standard register usage. */
@@ -292,6 +531,7 @@ extern int target_flags;
for details. */
#define STACK_REGS
+#define IS_STACK_MODE(mode) (mode==DFmode || mode==SFmode || mode==XFmode)
/* Number of actual hardware registers.
The hardware registers are assigned numbers for the compiler
@@ -409,31 +649,23 @@ extern int target_flags;
for cross-compiler testing. */
#define HARD_REGNO_MODE_OK(REGNO, MODE) \
- ((REGNO) < 2 ? 1 \
- : (REGNO) < 4 ? 1 \
+ ((REGNO) < 4 ? 1 \
: FP_REGNO_P (REGNO) \
? (((int) GET_MODE_CLASS (MODE) == (int) MODE_FLOAT \
|| (int) GET_MODE_CLASS (MODE) == (int) MODE_COMPLEX_FLOAT) \
- && GET_MODE_UNIT_SIZE (MODE) <= 12) \
- : (int) (MODE) != (int) QImode)
+ && GET_MODE_UNIT_SIZE (MODE) <= (LONG_DOUBLE_TYPE_SIZE == 96 ? 12 : 8))\
+ : (int) (MODE) != (int) QImode ? 1 \
+ : (reload_in_progress | reload_completed) == 1)
/* Value is 1 if it is a good idea to tie two pseudo registers
when one has mode MODE1 and one has mode MODE2.
If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
for any hard reg, then this must be 0 for correct output. */
-#define MODES_TIEABLE_P(MODE1, MODE2) ((MODE1) == (MODE2))
-
-/* A C expression returning the cost of moving data from a register of class
- CLASS1 to one of CLASS2.
-
- On the i386, copying between floating-point and fixed-point
- registers is expensive. */
-
-#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
- (((FLOAT_CLASS_P (CLASS1) && ! FLOAT_CLASS_P (CLASS2)) \
- || (! FLOAT_CLASS_P (CLASS1) && FLOAT_CLASS_P (CLASS2))) ? 10 \
- : 2)
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) \
+ || ((MODE1) == SImode && (MODE2) == HImode \
+ || (MODE1) == HImode && (MODE2) == SImode))
/* Specify the registers used for certain standard purposes.
The values of these macros are register numbers. */
@@ -459,7 +691,7 @@ extern int target_flags;
Zero means the frame pointer need not be set up (and parms
may be accessed via the stack pointer) in functions that seem suitable.
This is computed in `reload', in reload1.c. */
-#define FRAME_POINTER_REQUIRED 0
+#define FRAME_POINTER_REQUIRED (TARGET_OMIT_LEAF_FRAME_POINTER && !leaf_function_p ())
/* Base register for access to arguments of the function. */
#define ARG_POINTER_REGNUM 16
@@ -562,16 +794,16 @@ enum reg_class
of length N_REG_CLASSES. */
#define REG_CLASS_CONTENTS \
-{ 0, \
- 0x1, 0x2, 0x4, 0x8, /* AREG, DREG, CREG, BREG */ \
- 0x3, /* AD_REGS */ \
- 0xf, /* Q_REGS */ \
- 0x10, 0x20, /* SIREG, DIREG */ \
- 0x07f, /* INDEX_REGS */ \
- 0x100ff, /* GENERAL_REGS */ \
- 0x0100, 0x0200, /* FP_TOP_REG, FP_SECOND_REG */ \
- 0xff00, /* FLOAT_REGS */ \
- 0x1ffff }
+{ {0}, \
+ {0x1}, {0x2}, {0x4}, {0x8}, /* AREG, DREG, CREG, BREG */ \
+ {0x3}, /* AD_REGS */ \
+ {0xf}, /* Q_REGS */ \
+ {0x10}, {0x20}, /* SIREG, DIREG */ \
+ {0x7f}, /* INDEX_REGS */ \
+ {0x100ff}, /* GENERAL_REGS */ \
+ {0x0100}, {0x0200}, /* FP_TOP_REG, FP_SECOND_REG */ \
+ {0xff00}, /* FLOAT_REGS */ \
+ {0x1ffff}}
/* The same information, inverted:
Return the class number of the smallest class containing
@@ -584,7 +816,7 @@ enum reg_class
rtl to be used as spill registers but prevents the compiler from
extending the lifetime of these registers. */
-#define SMALL_REGISTER_CLASSES
+#define SMALL_REGISTER_CLASSES 1
#define QI_REG_P(X) \
(REG_P (X) && REGNO (X) < 4)
@@ -658,15 +890,25 @@ enum reg_class
(C) == 'L' ? (VALUE) == 0xffff : \
(C) == 'M' ? (VALUE) >= 0 && (VALUE) <= 3 : \
(C) == 'N' ? (VALUE) >= 0 && (VALUE) <= 255 :\
+ (C) == 'O' ? (VALUE) >= 0 && (VALUE) <= 32 : \
0)
/* Similar, but for floating constants, and defining letters G and H.
Here VALUE is the CONST_DOUBLE rtx itself. We allow constants even if
TARGET_387 isn't set, because the stack register converter may need to
- load 0.0 into the function value register. */
+ load 0.0 into the function value register.
+
+ We disallow these constants when -fomit-frame-pointer and compiling
+ PIC code since reload might need to force the constant to memory.
+ Forcing the constant to memory changes the elimination offsets after
+ the point where they must stay constant.
+
+ However, we must allow them after reload as completed as reg-stack.c
+ will create insns which use these constants. */
#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
- ((C) == 'G' ? standard_80387_constant_p (VALUE) : 0)
+ (((reload_completed || !flag_pic || !flag_omit_frame_pointer) && (C) == 'G') \
+ ? standard_80387_constant_p (VALUE) : 0)
/* Place additional restrictions on the register class to use when it
is necessary to be able to hold a value of mode MODE in a reload
@@ -791,14 +1033,14 @@ enum reg_class
If the precise function being called is known, FUNC is its FUNCTION_DECL;
otherwise, FUNC is 0. */
#define FUNCTION_VALUE(VALTYPE, FUNC) \
- gen_rtx (REG, TYPE_MODE (VALTYPE), \
+ gen_rtx_REG (TYPE_MODE (VALTYPE), \
VALUE_REGNO (TYPE_MODE (VALTYPE)))
/* Define how to find the value returned by a library function
assuming the value has mode MODE. */
#define LIBCALL_VALUE(MODE) \
- gen_rtx (REG, MODE, VALUE_REGNO (MODE))
+ gen_rtx_REG (MODE, VALUE_REGNO (MODE))
/* Define the size of the result block used for communication between
untyped_call and untyped_return. The block contains a DImode value
@@ -825,7 +1067,7 @@ typedef struct i386_args {
for a call to a function whose data type is FNTYPE.
For a library call, FNTYPE is 0. */
-#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME) \
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,INDIRECT) \
(init_cumulative_args (&CUM, FNTYPE, LIBNAME))
/* Update the data in CUM to advance over an argument
@@ -858,6 +1100,14 @@ typedef struct i386_args {
#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
(function_arg_partial_nregs (&CUM, MODE, TYPE, NAMED))
+/* This macro is invoked just before the start of a function.
+ It is used here to output code for -fpic that will load the
+ return address into %ebx. */
+
+#undef ASM_OUTPUT_FUNCTION_PREFIX
+#define ASM_OUTPUT_FUNCTION_PREFIX(FILE, FNNAME) \
+ asm_output_function_prefix (FILE, FNNAME)
+
/* This macro generates the assembly code for function entry.
FILE is a stdio stream to output the code to.
SIZE is an int: how many units of temporary storage to allocate.
@@ -887,27 +1137,94 @@ typedef struct i386_args {
} \
}
-/* A C statement or compound statement to output to FILE some
- assembler code to initialize basic-block profiling for the current
- object module. This code should call the subroutine
- `__bb_init_func' once per object module, passing it as its sole
- argument the address of a block allocated in the object module.
- The name of the block is a local symbol made with this statement:
+/* There are three profiling modes for basic blocks available.
+ The modes are selected at compile time by using the options
+ -a or -ax of the gnu compiler.
+ The variable `profile_block_flag' will be set according to the
+ selected option.
+
+ profile_block_flag == 0, no option used:
+
+ No profiling done.
+
+ profile_block_flag == 1, -a option used.
+
+ Count frequency of execution of every basic block.
+
+ profile_block_flag == 2, -ax option used.
+
+ Generate code to allow several different profiling modes at run time.
+ Available modes are:
+ Produce a trace of all basic blocks.
+ Count frequency of jump instructions executed.
+ In every mode it is possible to start profiling upon entering
+ certain functions and to disable profiling of some other functions.
+
+ The result of basic-block profiling will be written to a file `bb.out'.
+ If the -ax option is used parameters for the profiling will be read
+ from file `bb.in'.
+
+*/
+
+/* The following macro shall output assembler code to FILE
+ to initialize basic-block profiling.
+
+ If profile_block_flag == 2
+
+ Output code to call the subroutine `__bb_init_trace_func'
+ and pass two parameters to it. The first parameter is
+ the address of a block allocated in the object module.
+ The second parameter is the number of the first basic block
+ of the function.
+
+ The name of the block is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ The number of the first basic block of the function is
+ passed to the macro in BLOCK_OR_LABEL.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ parameter1 <- LPBX0
+ parameter2 <- BLOCK_OR_LABEL
+ call __bb_init_trace_func
+
+ else if profile_block_flag != 0
+
+ Output code to call the subroutine `__bb_init_func'
+ and pass one single parameter to it, which is the same
+ as the first parameter to `__bb_init_trace_func'.
- ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+ The first word of this parameter is a flag which will be nonzero if
+ the object module has already been initialized. So test this word
+ first, and do not call `__bb_init_func' if the flag is nonzero.
+ Note: When profile_block_flag == 2 the test need not be done
+ but `__bb_init_trace_func' *must* be called.
- Of course, since you are writing the definition of
- `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
- can take a short cut in the definition of this macro and use the
- name that you know will result.
+ BLOCK_OR_LABEL may be used to generate a label number as a
+ branch destination in case `__bb_init_func' will not be called.
- The first word of this block is a flag which will be nonzero if the
- object module has already been initialized. So test this word
- first, and do not call `__bb_init_func' if the flag is nonzero. */
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ cmp (LPBX0),0
+ jne local_label
+ parameter1 <- LPBX0
+ call __bb_init_func
+local_label:
+
+*/
#undef FUNCTION_BLOCK_PROFILER
-#define FUNCTION_BLOCK_PROFILER(STREAM, LABELNO) \
+#define FUNCTION_BLOCK_PROFILER(FILE, BLOCK_OR_LABEL) \
do \
{ \
static int num_func = 0; \
@@ -915,74 +1232,264 @@ do \
char block_table[80], false_label[80]; \
\
ASM_GENERATE_INTERNAL_LABEL (block_table, "LPBX", 0); \
- ASM_GENERATE_INTERNAL_LABEL (false_label, "LPBZ", num_func); \
\
- xops[0] = const0_rtx; \
- xops[1] = gen_rtx (SYMBOL_REF, VOIDmode, block_table); \
- xops[2] = gen_rtx (MEM, Pmode, gen_rtx (SYMBOL_REF, VOIDmode, false_label)); \
- xops[3] = gen_rtx (MEM, Pmode, gen_rtx (SYMBOL_REF, VOIDmode, "__bb_init_func")); \
- xops[4] = gen_rtx (MEM, Pmode, xops[1]); \
+ xops[1] = gen_rtx_SYMBOL_REF (VOIDmode, block_table); \
xops[5] = stack_pointer_rtx; \
- xops[6] = GEN_INT (4); \
- xops[7] = gen_rtx (REG, Pmode, 0); /* eax */ \
+ xops[7] = gen_rtx_REG (Pmode, 0); /* eax */ \
\
CONSTANT_POOL_ADDRESS_P (xops[1]) = TRUE; \
- CONSTANT_POOL_ADDRESS_P (xops[2]) = TRUE; \
- \
- output_asm_insn (AS2(cmp%L4,%0,%4), xops); \
- output_asm_insn (AS1(jne,%2), xops); \
\
- if (!flag_pic) \
- output_asm_insn (AS1(push%L1,%1), xops); \
- else \
+ switch (profile_block_flag) \
{ \
- output_asm_insn (AS2 (lea%L7,%a1,%7), xops); \
- output_asm_insn (AS1 (push%L7,%7), xops); \
- } \
\
- output_asm_insn (AS1(call,%P3), xops); \
- output_asm_insn (AS2(add%L0,%6,%5), xops); \
- ASM_OUTPUT_INTERNAL_LABEL (STREAM, "LPBZ", num_func); \
- num_func++; \
+ case 2: \
+ \
+ xops[2] = GEN_INT ((BLOCK_OR_LABEL)); \
+ xops[3] = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (VOIDmode, "__bb_init_trace_func")); \
+ xops[6] = GEN_INT (8); \
+ \
+ output_asm_insn (AS1(push%L2,%2), xops); \
+ if (!flag_pic) \
+ output_asm_insn (AS1(push%L1,%1), xops); \
+ else \
+ { \
+ output_asm_insn (AS2 (lea%L7,%a1,%7), xops); \
+ output_asm_insn (AS1 (push%L7,%7), xops); \
+ } \
+ \
+ output_asm_insn (AS1(call,%P3), xops); \
+ output_asm_insn (AS2(add%L0,%6,%5), xops); \
+ \
+ break; \
+ \
+ default: \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (false_label, "LPBZ", num_func); \
+ \
+ xops[0] = const0_rtx; \
+ xops[2] = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (VOIDmode, false_label)); \
+ xops[3] = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (VOIDmode, "__bb_init_func")); \
+ xops[4] = gen_rtx_MEM (Pmode, xops[1]); \
+ xops[6] = GEN_INT (4); \
+ \
+ CONSTANT_POOL_ADDRESS_P (xops[2]) = TRUE; \
+ \
+ output_asm_insn (AS2(cmp%L4,%0,%4), xops); \
+ output_asm_insn (AS1(jne,%2), xops); \
+ \
+ if (!flag_pic) \
+ output_asm_insn (AS1(push%L1,%1), xops); \
+ else \
+ { \
+ output_asm_insn (AS2 (lea%L7,%a1,%7), xops); \
+ output_asm_insn (AS1 (push%L7,%7), xops); \
+ } \
+ \
+ output_asm_insn (AS1(call,%P3), xops); \
+ output_asm_insn (AS2(add%L0,%6,%5), xops); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "LPBZ", num_func); \
+ num_func++; \
+ \
+ break; \
+ \
+ } \
} \
while (0)
+/* The following macro shall output assembler code to FILE
+ to increment a counter associated with basic block number BLOCKNO.
+
+ If profile_block_flag == 2
+
+ Output code to initialize the global structure `__bb' and
+ call the function `__bb_trace_func' which will increment the
+ counter.
-/* A C statement or compound statement to increment the count
- associated with the basic block number BLOCKNO. Basic blocks are
- numbered separately from zero within each compilation. The count
- associated with block number BLOCKNO is at index BLOCKNO in a
- vector of words; the name of this array is a local symbol made
- with this statement:
+ `__bb' consists of two words. In the first word the number
+ of the basic block has to be stored. In the second word
+ the address of a block allocated in the object module
+ has to be stored.
- ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
+ The basic block number is given by BLOCKNO.
- Of course, since you are writing the definition of
- `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
- can take a short cut in the definition of this macro and use the
- name that you know will result. */
+ The address of the block is given by the label created with
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ by FUNCTION_BLOCK_PROFILER.
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ move BLOCKNO -> (__bb)
+ move LPBX0 -> (__bb+4)
+ call __bb_trace_func
+
+ Note that function `__bb_trace_func' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE
+ and MACHINE_STATE_RESTORE. The last two macros will be
+ used in the function `__bb_trace_func', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE.
+
+ else if profile_block_flag != 0
+
+ Output code to increment the counter directly.
+ Basic blocks are numbered separately from zero within each
+ compiled object module. The count associated with block number
+ BLOCKNO is at index BLOCKNO in an array of words; the name of
+ this array is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ inc (LPBX2+4*BLOCKNO)
+
+*/
-#define BLOCK_PROFILER(STREAM, BLOCKNO) \
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
do \
{ \
- rtx xops[1], cnt_rtx; \
+ rtx xops[8], cnt_rtx; \
char counts[80]; \
+ char *block_table = counts; \
\
- ASM_GENERATE_INTERNAL_LABEL (counts, "LPBX", 2); \
- cnt_rtx = gen_rtx (SYMBOL_REF, VOIDmode, counts); \
- SYMBOL_REF_FLAG (cnt_rtx) = TRUE; \
+ switch (profile_block_flag) \
+ { \
\
- if (BLOCKNO) \
- cnt_rtx = plus_constant (cnt_rtx, (BLOCKNO)*4); \
+ case 2: \
\
- if (flag_pic) \
- cnt_rtx = gen_rtx (PLUS, Pmode, pic_offset_table_rtx, cnt_rtx); \
+ ASM_GENERATE_INTERNAL_LABEL (block_table, "LPBX", 0); \
+ \
+ xops[1] = gen_rtx_SYMBOL_REF (VOIDmode, block_table); \
+ xops[2] = GEN_INT ((BLOCKNO)); \
+ xops[3] = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (VOIDmode, "__bb_trace_func")); \
+ xops[4] = gen_rtx_SYMBOL_REF (VOIDmode, "__bb"); \
+ xops[5] = plus_constant (xops[4], 4); \
+ xops[0] = gen_rtx_MEM (SImode, xops[4]); \
+ xops[6] = gen_rtx_MEM (SImode, xops[5]); \
+ \
+ CONSTANT_POOL_ADDRESS_P (xops[1]) = TRUE; \
+ \
+ fprintf(FILE, "\tpushf\n"); \
+ output_asm_insn (AS2(mov%L0,%2,%0), xops); \
+ if (flag_pic) \
+ { \
+ xops[7] = gen_rtx_REG (Pmode, 0); /* eax */ \
+ output_asm_insn (AS1(push%L7,%7), xops); \
+ output_asm_insn (AS2(lea%L7,%a1,%7), xops); \
+ output_asm_insn (AS2(mov%L6,%7,%6), xops); \
+ output_asm_insn (AS1(pop%L7,%7), xops); \
+ } \
+ else \
+ output_asm_insn (AS2(mov%L6,%1,%6), xops); \
+ output_asm_insn (AS1(call,%P3), xops); \
+ fprintf(FILE, "\tpopf\n"); \
+ \
+ break; \
+ \
+ default: \
+ \
+ ASM_GENERATE_INTERNAL_LABEL (counts, "LPBX", 2); \
+ cnt_rtx = gen_rtx_SYMBOL_REF (VOIDmode, counts); \
+ SYMBOL_REF_FLAG (cnt_rtx) = TRUE; \
\
- xops[0] = gen_rtx (MEM, SImode, cnt_rtx); \
- output_asm_insn (AS1(inc%L0,%0), xops); \
+ if (BLOCKNO) \
+ cnt_rtx = plus_constant (cnt_rtx, (BLOCKNO)*4); \
+ \
+ if (flag_pic) \
+ cnt_rtx = gen_rtx_PLUS (Pmode, pic_offset_table_rtx, cnt_rtx); \
+ \
+ xops[0] = gen_rtx_MEM (SImode, cnt_rtx); \
+ output_asm_insn (AS1(inc%L0,%0), xops); \
+ \
+ break; \
+ \
+ } \
} \
while (0)
+/* The following macro shall output assembler code to FILE
+ to indicate a return from function during basic-block profiling.
+
+ If profiling_block_flag == 2:
+
+ Output assembler code to call function `__bb_trace_ret'.
+
+ Note that function `__bb_trace_ret' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE_RET
+ and MACHINE_STATE_RESTORE_RET. The last two macros will be
+ used in the function `__bb_trace_ret', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE_RET.
+
+ else if profiling_block_flag != 0:
+
+ The macro will not be used, so it need not distinguish
+ these cases.
+*/
+
+#define FUNCTION_BLOCK_PROFILER_EXIT(FILE) \
+do \
+ { \
+ rtx xops[1]; \
+ \
+ xops[0] = gen_rtx_MEM (Pmode, gen_rtx_SYMBOL_REF (VOIDmode, "__bb_trace_ret")); \
+ \
+ output_asm_insn (AS1(call,%P0), xops); \
+ \
+ } \
+while (0)
+
+/* The function `__bb_trace_func' is called in every basic block
+ and is not allowed to change the machine state. Saving (restoring)
+ the state can either be done in the BLOCK_PROFILER macro,
+ before calling function (rsp. after returning from function)
+ `__bb_trace_func', or it can be done inside the function by
+ defining the macros:
+
+ MACHINE_STATE_SAVE(ID)
+ MACHINE_STATE_RESTORE(ID)
+
+ In the latter case care must be taken, that the prologue code
+ of function `__bb_trace_func' does not already change the
+ state prior to saving it with MACHINE_STATE_SAVE.
+
+ The parameter `ID' is a string identifying a unique macro use.
+
+ On the i386 the initialization code at the begin of
+ function `__bb_trace_func' contains a `sub' instruction
+ therefore we handle save and restore of the flag register
+ in the BLOCK_PROFILER macro. */
+
+#define MACHINE_STATE_SAVE(ID) \
+ asm (" pushl %eax"); \
+ asm (" pushl %ecx"); \
+ asm (" pushl %edx"); \
+ asm (" pushl %esi");
+
+#define MACHINE_STATE_RESTORE(ID) \
+ asm (" popl %esi"); \
+ asm (" popl %edx"); \
+ asm (" popl %ecx"); \
+ asm (" popl %eax");
+
/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
the stack pointer does not matter. The value is tested only in
functions that have frame pointers.
@@ -1008,14 +1515,19 @@ while (0)
off the end. This happens if the function ends in an "exit" call, or
if a `return' insn is emitted directly into the function. */
-#define FUNCTION_EPILOGUE(FILE, SIZE) \
+#if 0
+#define FUNCTION_BEGIN_EPILOGUE(FILE) \
do { \
rtx last = get_last_insn (); \
if (last && GET_CODE (last) == NOTE) \
last = prev_nonnote_insn (last); \
- if (! last || GET_CODE (last) != BARRIER) \
- function_epilogue (FILE, SIZE); \
+/* if (! last || GET_CODE (last) != BARRIER) \
+ function_epilogue (FILE, SIZE);*/ \
} while (0)
+#endif
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+ function_epilogue (FILE, SIZE)
/* Output assembler code for a block containing the constant parts
of a trampoline, leaving space for the variable parts. */
@@ -1046,8 +1558,8 @@ do { \
#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
{ \
- emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 1)), CXT); \
- emit_move_insn (gen_rtx (MEM, SImode, plus_constant (TRAMP, 6)), FNADDR); \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 1)), CXT); \
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (TRAMP, 6)), FNADDR); \
}
/* Definitions for register eliminations.
@@ -1094,8 +1606,9 @@ do { \
\
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) \
if ((regs_ever_live[regno] && ! call_used_regs[regno]) \
- || (current_function_uses_pic_offset_table \
- && regno == PIC_OFFSET_TABLE_REGNUM)) \
+ || ((current_function_uses_pic_offset_table \
+ || current_function_uses_const_pool) \
+ && flag_pic && regno == PIC_OFFSET_TABLE_REGNUM)) \
offset += 4; \
\
(OFFSET) = offset + get_frame_size (); \
@@ -1239,12 +1752,13 @@ do { \
#define LEGITIMIZE_ADDRESS(X, OLDX, MODE, WIN) \
{ \
- rtx orig_x = (X); \
(X) = legitimize_address (X, OLDX, MODE); \
if (memory_address_p (MODE, X)) \
goto WIN; \
}
+#define REWRITE_ADDRESS(x) rewrite_address(x)
+
/* Nonzero if the constant value X is a legitimate general operand
when generating PIC code. It is given that flag_pic is on and
that X satisfies CONSTANT_P or is a CONST_DOUBLE. */
@@ -1279,6 +1793,15 @@ do \
{ \
rtx rtl = (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \
? TREE_CST_RTL (DECL) : DECL_RTL (DECL)); \
+ \
+ if (TARGET_DEBUG_ADDR \
+ && TREE_CODE_CLASS (TREE_CODE (DECL)) == 'd') \
+ { \
+ fprintf (stderr, "Encode %s, public = %d\n", \
+ IDENTIFIER_POINTER (DECL_NAME (DECL)), \
+ TREE_PUBLIC (DECL)); \
+ } \
+ \
SYMBOL_REF_FLAG (XEXP (rtl, 0)) \
= (TREE_CODE_CLASS (TREE_CODE (DECL)) != 'd' \
|| ! TREE_PUBLIC (DECL)); \
@@ -1349,10 +1872,11 @@ while (0)
for the index in the tablejump instruction. */
#define CASE_VECTOR_MODE Pmode
-/* Define this if the tablejump instruction expects the table
- to contain offsets from the address of the table.
- Do not define this if the table should contain absolute addresses. */
-/* #define CASE_VECTOR_PC_RELATIVE */
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
/* Specify the tree operation to be used to convert reals to integers.
This should be changed to take advantage of fist --wfs ??
@@ -1369,22 +1893,23 @@ while (0)
in one reasonably fast instruction. */
#define MOVE_MAX 4
-/* MOVE_RATIO is the number of move instructions that is better than a
- block move. Make this large on i386, since the block move is very
- inefficient with small blocks, and the hard register needs of the
- block move require much reload work. */
-#define MOVE_RATIO 5
+/* The number of scalar move insns which should be generated instead
+ of a string move insn or a library call. Increasing the value
+ will always make code faster, but eventually incurs high cost in
+ increased code size.
-/* Define this if zero-extension is slow (more than one real instruction). */
-/* #define SLOW_ZERO_EXTEND */
+ If you don't define this, a reasonable default is used.
-/* Nonzero if access to memory by bytes is slow and undesirable. */
-#define SLOW_BYTE_ACCESS 0
+ Make this large on i386, since the block move is very inefficient with small
+ blocks, and the hard register needs of the block move require much reload
+ work. */
+
+#define MOVE_RATIO 5
/* Define if shifts truncate the shift count
which implies one can omit a sign-extension or zero-extension
of a shift count. */
-/* One i386, shifts do truncate the count. But bit opcodes don't. */
+/* On i386, shifts do truncate the count. But bit opcodes don't. */
/* #define SHIFT_COUNT_TRUNCATED */
@@ -1411,70 +1936,194 @@ while (0)
is a byte address (for indexing purposes)
so give the MEM rtx a byte's mode. */
#define FUNCTION_MODE QImode
-
-/* Define this if addresses of constant functions
- shouldn't be put through pseudo regs where they can be cse'd.
- Desirable on the 386 because a CALL with a constant address is
- not much slower than one with a register address. On a 486,
- it is faster to call with a constant address than indirect. */
-#define NO_FUNCTION_CSE
-
-/* Provide the costs of a rtl expression. This is in the body of a
- switch on CODE. */
-
-#define RTX_COSTS(X,CODE,OUTER_CODE) \
- case MULT: \
- return COSTS_N_INSNS (20); \
- case DIV: \
- case UDIV: \
- case MOD: \
- case UMOD: \
- return COSTS_N_INSNS (20); \
- case ASHIFTRT: \
- case LSHIFTRT: \
- case ASHIFT: \
- return (4 + rtx_cost (XEXP (X, 0), OUTER_CODE) \
- + rtx_cost (XEXP (X, 1), OUTER_CODE)); \
- case PLUS: \
- if (GET_CODE (XEXP (X, 0)) == MULT \
- && GET_CODE (XEXP (XEXP (X, 0), 1)) == CONST_INT \
- && (INTVAL (XEXP (XEXP (X, 0), 1)) == 2 \
- || INTVAL (XEXP (XEXP (X, 0), 1)) == 4 \
- || INTVAL (XEXP (XEXP (X, 0), 1)) == 8)) \
- return (2 + rtx_cost (XEXP (XEXP (X, 0), 0), OUTER_CODE) \
- + rtx_cost (XEXP (X, 1), OUTER_CODE)); \
- break;
-
-
-/* Compute the cost of computing a constant rtl expression RTX
- whose rtx-code is CODE. The body of this macro is a portion
- of a switch statement. If the code is computed here,
- return it with a return statement. Otherwise, break from the switch. */
+
+/* A part of a C `switch' statement that describes the relative costs
+ of constant RTL expressions. It must contain `case' labels for
+ expression codes `const_int', `const', `symbol_ref', `label_ref'
+ and `const_double'. Each case must ultimately reach a `return'
+ statement to return the relative cost of the use of that kind of
+ constant value in an expression. The cost may depend on the
+ precise value of the constant, which is available for examination
+ in X, and the rtx code of the expression in which it is contained,
+ found in OUTER_CODE.
+
+ CODE is the expression code--redundant, since it can be obtained
+ with `GET_CODE (X)'. */
#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
case CONST_INT: \
+ return (unsigned) INTVAL (RTX) < 256 ? 0 : 1; \
case CONST: \
case LABEL_REF: \
case SYMBOL_REF: \
- return flag_pic && SYMBOLIC_CONST (RTX) ? 2 : 0; \
+ return flag_pic && SYMBOLIC_CONST (RTX) ? 2 : 1; \
+ \
case CONST_DOUBLE: \
{ \
int code; \
if (GET_MODE (RTX) == VOIDmode) \
return 2; \
+ \
code = standard_80387_constant_p (RTX); \
return code == 1 ? 0 : \
code == 2 ? 1 : \
2; \
}
-/* Compute the cost of an address. This is meant to approximate the size
- and/or execution delay of an insn using that address. If the cost is
- approximated by the RTL complexity, including CONST_COSTS above, as
- is usually the case for CISC machines, this macro should not be defined.
- For aggressively RISCy machines, only one insn format is allowed, so
- this macro should be a constant. The value of this macro only matters
- for valid addresses.
+/* Delete the definition here when TOPLEVEL_COSTS_N_INSNS gets added to cse.c */
+#define TOPLEVEL_COSTS_N_INSNS(N) {total = COSTS_N_INSNS (N); break;}
+
+/* Like `CONST_COSTS' but applies to nonconstant RTL expressions.
+ This can be used, for example, to indicate how costly a multiply
+ instruction is. In writing this macro, you can use the construct
+ `COSTS_N_INSNS (N)' to specify a cost equal to N fast
+ instructions. OUTER_CODE is the code of the expression in which X
+ is contained.
+
+ This macro is optional; do not define it if the default cost
+ assumptions are adequate for the target machine. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case ASHIFT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT \
+ && GET_MODE (XEXP (X, 0)) == SImode) \
+ { \
+ HOST_WIDE_INT value = INTVAL (XEXP (X, 1)); \
+ \
+ if (value == 1) \
+ return COSTS_N_INSNS (ix86_cost->add) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ \
+ if (value == 2 || value == 3) \
+ return COSTS_N_INSNS (ix86_cost->lea) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ } \
+ /* fall through */ \
+ \
+ case ROTATE: \
+ case ASHIFTRT: \
+ case LSHIFTRT: \
+ case ROTATERT: \
+ if (GET_MODE (XEXP (X, 0)) == DImode) \
+ { \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ if (INTVAL (XEXP (X, 1)) > 32) \
+ return COSTS_N_INSNS(ix86_cost->shift_const + 2); \
+ return COSTS_N_INSNS(ix86_cost->shift_const * 2); \
+ } \
+ return ((GET_CODE (XEXP (X, 1)) == AND \
+ ? COSTS_N_INSNS(ix86_cost->shift_var * 2) \
+ : COSTS_N_INSNS(ix86_cost->shift_var * 6 + 2)) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE)); \
+ } \
+ return COSTS_N_INSNS (GET_CODE (XEXP (X, 1)) == CONST_INT \
+ ? ix86_cost->shift_const \
+ : ix86_cost->shift_var) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ \
+ case MULT: \
+ if (GET_CODE (XEXP (X, 1)) == CONST_INT) \
+ { \
+ unsigned HOST_WIDE_INT value = INTVAL (XEXP (X, 1)); \
+ int nbits = 0; \
+ \
+ if (value == 2) \
+ return COSTS_N_INSNS (ix86_cost->add) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ if (value == 4 || value == 8) \
+ return COSTS_N_INSNS (ix86_cost->lea) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ \
+ while (value != 0) \
+ { \
+ nbits++; \
+ value >>= 1; \
+ } \
+ \
+ if (nbits == 1) \
+ return COSTS_N_INSNS (ix86_cost->shift_const) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ \
+ return COSTS_N_INSNS (ix86_cost->mult_init \
+ + nbits * ix86_cost->mult_bit) \
+ + rtx_cost(XEXP (X, 0), OUTER_CODE); \
+ } \
+ \
+ else /* This is arbitrary */ \
+ TOPLEVEL_COSTS_N_INSNS (ix86_cost->mult_init \
+ + 7 * ix86_cost->mult_bit); \
+ \
+ case DIV: \
+ case UDIV: \
+ case MOD: \
+ case UMOD: \
+ TOPLEVEL_COSTS_N_INSNS (ix86_cost->divide); \
+ \
+ case PLUS: \
+ if (GET_CODE (XEXP (X, 0)) == REG \
+ && GET_MODE (XEXP (X, 0)) == SImode \
+ && GET_CODE (XEXP (X, 1)) == PLUS) \
+ return COSTS_N_INSNS (ix86_cost->lea); \
+ \
+ /* fall through */ \
+ case AND: \
+ case IOR: \
+ case XOR: \
+ case MINUS: \
+ if (GET_MODE (X) == DImode) \
+ return COSTS_N_INSNS (ix86_cost->add) * 2 \
+ + (rtx_cost (XEXP (X, 0), OUTER_CODE) \
+ << (GET_MODE (XEXP (X, 0)) != DImode)) \
+ + (rtx_cost (XEXP (X, 1), OUTER_CODE) \
+ << (GET_MODE (XEXP (X, 1)) != DImode)); \
+ case NEG: \
+ case NOT: \
+ if (GET_MODE (X) == DImode) \
+ TOPLEVEL_COSTS_N_INSNS (ix86_cost->add * 2) \
+ TOPLEVEL_COSTS_N_INSNS (ix86_cost->add)
+
+
+/* An expression giving the cost of an addressing mode that contains
+ ADDRESS. If not defined, the cost is computed from the ADDRESS
+ expression and the `CONST_COSTS' values.
+
+ For most CISC machines, the default cost is a good approximation
+ of the true cost of the addressing mode. However, on RISC
+ machines, all instructions normally have the same length and
+ execution time. Hence all addresses will have equal costs.
+
+ In cases where more than one form of an address is known, the form
+ with the lowest cost will be used. If multiple forms have the
+ same, lowest, cost, the one that is the most complex will be used.
+
+ For example, suppose an address that is equal to the sum of a
+ register and a constant is used twice in the same basic block.
+ When this macro is not defined, the address will be computed in a
+ register and memory references will be indirect through that
+ register. On machines where the cost of the addressing mode
+ containing the sum is no higher than that of a simple indirect
+ reference, this will produce an additional instruction and
+ possibly require an additional register. Proper specification of
+ this macro eliminates this overhead for such machines.
+
+ Similar use of this macro is made in strength reduction of loops.
+
+ ADDRESS need not be valid as an address. In such a case, the cost
+ is not relevant and can be any value; invalid addresses need not be
+ assigned a different cost.
+
+ On machines where an address involving more than one register is as
+ cheap as an address computation involving only one register,
+ defining `ADDRESS_COST' to reflect this can cause two registers to
+ be live over a region of code where only one would have been if
+ `ADDRESS_COST' were not defined in that manner. This effect should
+ be considered in the definition of this macro. Equivalent costs
+ should probably only be given to addresses with different numbers
+ of registers on machines with lots of registers.
+
+ This macro will normally either not be defined or be defined as a
+ constant.
For i386, it is better to use a complex address than let gcc copy
the address into a reg and make a new pseudo. But not if the address
@@ -1487,6 +2136,193 @@ while (0)
&& REG_P (XEXP (RTX, 0)))) ? 0 \
: REG_P (RTX) ? 1 \
: 2)
+
+/* A C expression for the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost.
+
+ On the i386, copying between floating-point and fixed-point
+ registers is expensive. */
+
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((FLOAT_CLASS_P (CLASS1) && ! FLOAT_CLASS_P (CLASS2)) \
+ || (! FLOAT_CLASS_P (CLASS1) && FLOAT_CLASS_P (CLASS2))) ? 10 \
+ : 2)
+
+
+/* A C expression for the cost of moving data of mode M between a
+ register and memory. A value of 2 is the default; this cost is
+ relative to those in `REGISTER_MOVE_COST'.
+
+ If moving between registers and memory is more expensive than
+ between two registers, you should define this macro to express the
+ relative cost. */
+
+/* #define MEMORY_MOVE_COST(M,C,I) 2 */
+
+/* A C expression for the cost of a branch instruction. A value of 1
+ is the default; other values are interpreted relative to that. */
+
+#define BRANCH_COST i386_branch_cost
+
+/* Define this macro as a C expression which is nonzero if accessing
+ less than a word of memory (i.e. a `char' or a `short') is no
+ faster than accessing a word of memory, i.e., if such access
+ require more than one instruction or if there is no difference in
+ cost between byte and (aligned) word loads.
+
+ When this macro is not defined, the compiler will access a field by
+ finding the smallest containing object; when it is defined, a
+ fullword load will be used if alignment permits. Unless bytes
+ accesses are faster than word accesses, using word accesses is
+ preferable since it may eliminate subsequent memory access if
+ subsequent accesses occur to other fields in the same word of the
+ structure, but to different bytes. */
+
+#define SLOW_BYTE_ACCESS 0
+
+/* Nonzero if access to memory by shorts is slow and undesirable. */
+#define SLOW_SHORT_ACCESS 0
+
+/* Define this macro if zero-extension (of a `char' or `short' to an
+ `int') can be done faster if the destination is a register that is
+ known to be zero.
+
+ If you define this macro, you must have instruction patterns that
+ recognize RTL structures like this:
+
+ (set (strict_low_part (subreg:QI (reg:SI ...) 0)) ...)
+
+ and likewise for `HImode'. */
+
+/* #define SLOW_ZERO_EXTEND */
+
+/* Define this macro to be the value 1 if unaligned accesses have a
+ cost many times greater than aligned accesses, for example if they
+ are emulated in a trap handler.
+
+ When this macro is non-zero, the compiler will act as if
+ `STRICT_ALIGNMENT' were non-zero when generating code for block
+ moves. This can cause significantly more instructions to be
+ produced. Therefore, do not set this macro non-zero if unaligned
+ accesses only add a cycle or two to the time for a memory access.
+
+ If the value of this macro is always zero, it need not be defined. */
+
+/* #define SLOW_UNALIGNED_ACCESS 0 */
+
+/* Define this macro to inhibit strength reduction of memory
+ addresses. (On some machines, such strength reduction seems to do
+ harm rather than good.) */
+
+/* #define DONT_REDUCE_ADDR */
+
+/* Define this macro if it is as good or better to call a constant
+ function address than to call an address kept in a register.
+
+ Desirable on the 386 because a CALL with a constant address is
+ faster than one with a register address. */
+
+#define NO_FUNCTION_CSE
+
+/* Define this macro if it is as good or better for a function to call
+ itself with an explicit address than to call an address kept in a
+ register. */
+
+#define NO_RECURSIVE_FUNCTION_CSE
+
+/* A C statement (sans semicolon) to update the integer variable COST
+ based on the relationship between INSN that is dependent on
+ DEP_INSN through the dependence LINK. The default is to make no
+ adjustment to COST. This can be used for example to specify to
+ the scheduler that an output- or anti-dependence does not incur
+ the same cost as a data-dependence. */
+
+#define ADJUST_COST(insn,link,dep_insn,cost) \
+ { \
+ rtx next_inst; \
+ if (GET_CODE (dep_insn) == CALL_INSN) \
+ (cost) = 0; \
+ \
+ else if (GET_CODE (dep_insn) == INSN \
+ && GET_CODE (PATTERN (dep_insn)) == SET \
+ && GET_CODE (SET_DEST (PATTERN (dep_insn))) == REG \
+ && GET_CODE (insn) == INSN \
+ && GET_CODE (PATTERN (insn)) == SET \
+ && !reg_overlap_mentioned_p (SET_DEST (PATTERN (dep_insn)), \
+ SET_SRC (PATTERN (insn)))) \
+ { \
+ (cost) = 0; \
+ } \
+ \
+ else if (GET_CODE (insn) == JUMP_INSN) \
+ { \
+ (cost) = 0; \
+ } \
+ \
+ if (TARGET_PENTIUM) \
+ { \
+ if (cost !=0 && is_fp_insn (insn) && is_fp_insn (dep_insn) \
+ && !is_fp_dest (dep_insn)) \
+ { \
+ (cost) = 0; \
+ } \
+ \
+ if (agi_dependent (insn, dep_insn)) \
+ { \
+ (cost) = 3; \
+ } \
+ else if (GET_CODE (insn) == INSN \
+ && GET_CODE (PATTERN (insn)) == SET \
+ && SET_DEST (PATTERN (insn)) == cc0_rtx \
+ && (next_inst = next_nonnote_insn (insn)) \
+ && GET_CODE (next_inst) == JUMP_INSN) \
+ { /* compare probably paired with jump */ \
+ (cost) = 0; \
+ } \
+ } \
+ else \
+ if (!is_fp_dest (dep_insn)) \
+ { \
+ if(!agi_dependent (insn, dep_insn)) \
+ (cost) = 0; \
+ else if (TARGET_486) \
+ (cost) = 2; \
+ } \
+ else \
+ if (is_fp_store (insn) && is_fp_insn (dep_insn) \
+ && NEXT_INSN (insn) && NEXT_INSN (NEXT_INSN (insn)) \
+ && NEXT_INSN (NEXT_INSN (NEXT_INSN (insn))) \
+ && (GET_CODE (NEXT_INSN (insn)) == INSN) \
+ && (GET_CODE (NEXT_INSN (NEXT_INSN (insn))) == JUMP_INSN) \
+ && (GET_CODE (NEXT_INSN (NEXT_INSN (NEXT_INSN (insn)))) == NOTE) \
+ && (NOTE_LINE_NUMBER (NEXT_INSN (NEXT_INSN (NEXT_INSN (insn)))) \
+ == NOTE_INSN_LOOP_END)) \
+ { \
+ (cost) = 3; \
+ } \
+ }
+
+
+#define ADJUST_BLOCKAGE(last_insn,insn,blockage) \
+{ \
+ if (is_fp_store (last_insn) && is_fp_insn (insn) \
+ && NEXT_INSN (last_insn) && NEXT_INSN (NEXT_INSN (last_insn)) \
+ && NEXT_INSN (NEXT_INSN (NEXT_INSN (last_insn))) \
+ && (GET_CODE (NEXT_INSN (last_insn)) == INSN) \
+ && (GET_CODE (NEXT_INSN (NEXT_INSN (last_insn))) == JUMP_INSN) \
+ && (GET_CODE (NEXT_INSN (NEXT_INSN (NEXT_INSN (last_insn)))) == NOTE) \
+ && (NOTE_LINE_NUMBER (NEXT_INSN (NEXT_INSN (NEXT_INSN (last_insn)))) \
+ == NOTE_INSN_LOOP_END)) \
+ { \
+ (blockage) = 3; \
+ } \
+}
+
/* Add any extra modes needed to represent the condition code.
@@ -1519,6 +2355,10 @@ extern struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
/* Here we define machine-dependent flags and fields in cc_status
(see `conditions.h'). */
+/* Set if the cc value was actually from the 80387 and
+ we are testing eax directly (i.e. no sahf) */
+#define CC_TEST_AX 020000
+
/* Set if the cc value is actually in the 80387, so a floating point
conditional branch must be output. */
#define CC_IN_80387 04000
@@ -1527,6 +2367,10 @@ extern struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
the state of equality is indicated by zero in the carry bit. */
#define CC_Z_IN_NOT_C 010000
+/* Set if the CC value was actually from the 80387 and loaded directly
+ into the eflags instead of via eax/sahf. */
+#define CC_FCOMI 040000
+
/* Store in cc_status the expressions
that the condition codes will describe
after execution of an instruction whose pattern is EXP.
@@ -1572,10 +2416,10 @@ extern struct rtx_def *(*i386_compare_gen)(), *(*i386_compare_gen_eq)();
/* Table of additional register names to use in user input. */
#define ADDITIONAL_REGISTER_NAMES \
-{ "eax", 0, "edx", 1, "ecx", 2, "ebx", 3, \
- "esi", 4, "edi", 5, "ebp", 6, "esp", 7, \
- "al", 0, "dl", 1, "cl", 2, "bl", 3, \
- "ah", 0, "dh", 1, "ch", 2, "bh", 3 }
+{ { "eax", 0 }, { "edx", 1 }, { "ecx", 2 }, { "ebx", 3 }, \
+ { "esi", 4 }, { "edi", 5 }, { "ebp", 6 }, { "esp", 7 }, \
+ { "al", 0 }, { "dl", 1 }, { "cl", 2 }, { "bl", 3 }, \
+ { "ah", 0 }, { "dh", 1 }, { "ch", 2 }, { "bh", 3 } }
/* Note we are omitting these since currently I don't know how
to get gcc to use these, since they want the same but different
@@ -1609,6 +2453,22 @@ number as al, and ax.
(n) == 7 ? 5 : \
(n) + 4)
+/* Before the prologue, RA is at 0(%esp). */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_MEM (VOIDmode, gen_rtx_REG (VOIDmode, STACK_POINTER_REGNUM))
+
+/* After the prologue, RA is at -4(AP) in the current frame. */
+#define RETURN_ADDR_RTX(COUNT, FRAME) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, arg_pointer_rtx, GEN_INT(-4)))\
+ : gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, (FRAME), GEN_INT(4))))
+
+/* PC is dbx register 8; let's use that column for RA. */
+#define DWARF_FRAME_RETURN_COLUMN 8
+
+/* Before the prologue, the top of the frame is at 4(%esp). */
+#define INCOMING_FRAME_SP_OFFSET 4
+
/* This is how to output the definition of a user-level label named NAME,
such as the label on a static function or variable NAME. */
@@ -1620,10 +2480,7 @@ number as al, and ax.
#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
do { long l[2]; \
REAL_VALUE_TO_TARGET_DOUBLE (VALUE, l); \
- if (sizeof (int) == sizeof (long)) \
- fprintf (FILE, "%s 0x%x,0x%x\n", ASM_LONG, l[0], l[1]); \
- else \
- fprintf (FILE, "%s 0x%lx,0x%lx\n", ASM_LONG, l[0], l[1]); \
+ fprintf (FILE, "%s 0x%lx,0x%lx\n", ASM_LONG, l[0], l[1]); \
} while (0)
/* This is how to output a `long double' extended real constant. */
@@ -1632,10 +2489,7 @@ do { long l[2]; \
#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) \
do { long l[3]; \
REAL_VALUE_TO_TARGET_LONG_DOUBLE (VALUE, l); \
- if (sizeof (int) == sizeof (long)) \
- fprintf (FILE, "%s 0x%x,0x%x,0x%x\n", ASM_LONG, l[0], l[1], l[2]); \
- else \
- fprintf (FILE, "%s 0x%lx,0x%lx,0x%lx\n", ASM_LONG, l[0], l[1], l[2]); \
+ fprintf (FILE, "%s 0x%lx,0x%lx,0x%lx\n", ASM_LONG, l[0], l[1], l[2]); \
} while (0)
/* This is how to output an assembler line defining a `float' constant. */
@@ -1643,10 +2497,7 @@ do { long l[3]; \
#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
do { long l; \
REAL_VALUE_TO_TARGET_SINGLE (VALUE, l); \
- if (sizeof (int) == sizeof (long)) \
- fprintf ((FILE), "%s 0x%x\n", ASM_LONG, l); \
- else \
- fprintf ((FILE), "%s 0x%lx\n", ASM_LONG, l); \
+ fprintf ((FILE), "%s 0x%lx\n", ASM_LONG, l); \
} while (0)
/* Store in OUTPUT a string (made with alloca) containing
@@ -1698,13 +2549,13 @@ do { long l; \
It need not be very fast code. */
#define ASM_OUTPUT_REG_PUSH(FILE,REGNO) \
- fprintf (FILE, "\tpushl e%s\n", reg_names[REGNO])
+ fprintf (FILE, "\tpushl %%e%s\n", reg_names[REGNO])
/* This is how to output an insn to pop a register from the stack.
It need not be very fast code. */
#define ASM_OUTPUT_REG_POP(FILE,REGNO) \
- fprintf (FILE, "\tpopl e%s\n", reg_names[REGNO])
+ fprintf (FILE, "\tpopl %%e%s\n", reg_names[REGNO])
/* This is how to output an element of a case-vector that is absolute.
*/
@@ -1717,7 +2568,7 @@ do { long l; \
forward reference the differences.
*/
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.word %s%d-%s%d\n",LPREFIX, VALUE,LPREFIX, REL)
/* Define the parentheses used to group arithmetic operations
@@ -1746,9 +2597,13 @@ do { long l; \
R -- print the prefix for register names.
z -- print the opcode suffix for the size of the current operand.
* -- print a star (in certain assembler syntax)
- w -- print the operand as if it's a "word" (HImode) even if it isn't.
- b -- print the operand as if it's a byte (QImode) even if it isn't.
- c -- don't print special prefixes before constant operands. */
+ P -- if PIC, print an @PLT suffix.
+ X -- don't print any sort of PIC '@' suffix for a symbol.
+ J -- print jump insn for arithmetic_comparison_operator.
+ s -- ??? something to do with double shifts. not actually used, afaik.
+ C -- print a conditional move suffix corresponding to the op code.
+ c -- likewise, but reverse the condition.
+ F,f -- likewise, but for floating-point. */
#define PRINT_OPERAND_PUNCT_VALID_P(CODE) \
((CODE) == '*')
@@ -1853,13 +2708,30 @@ extern char *qi_high_reg_name[];
we can use for operand syntax in the extended asm */
#define ASM_OPERAND_LETTER '#'
-
#define RET return ""
-#define AT_SP(mode) (gen_rtx (MEM, (mode), stack_pointer_rtx))
+#define AT_SP(mode) (gen_rtx_MEM ((mode), stack_pointer_rtx))
+
+/* Helper macros to expand a binary/unary operator if needed */
+#define IX86_EXPAND_BINARY_OPERATOR(OP, MODE, OPERANDS) \
+do { \
+ if (!ix86_expand_binary_operator (OP, MODE, OPERANDS)) \
+ FAIL; \
+} while (0)
+
+#define IX86_EXPAND_UNARY_OPERATOR(OP, MODE, OPERANDS) \
+do { \
+ if (!ix86_expand_unary_operator (OP, MODE, OPERANDS,)) \
+ FAIL; \
+} while (0)
+
/* Functions in i386.c */
extern void override_options ();
extern void order_regs_for_local_alloc ();
+extern char *output_strlen_unroll ();
+extern struct rtx_def *i386_sext16_if_const ();
+extern int i386_aligned_p ();
+extern int i386_cc_probably_useless_p ();
extern int i386_valid_decl_attribute_p ();
extern int i386_valid_type_attribute_p ();
extern int i386_return_pops_args ();
@@ -1868,6 +2740,7 @@ extern void init_cumulative_args ();
extern void function_arg_advance ();
extern struct rtx_def *function_arg ();
extern int function_arg_partial_nregs ();
+extern char *output_strlen_unroll ();
extern void output_op_from_reg ();
extern void output_to_reg ();
extern char *singlemove_string ();
@@ -1880,6 +2753,10 @@ extern int symbolic_operand ();
extern int call_insn_operand ();
extern int expander_call_insn_operand ();
extern int symbolic_reference_mentioned_p ();
+extern int ix86_expand_binary_operator ();
+extern int ix86_binary_operator_ok ();
+extern int ix86_expand_unary_operator ();
+extern int ix86_unary_operator_ok ();
extern void emit_pic_move ();
extern void function_prologue ();
extern int simple_386_epilogue ();
@@ -1902,17 +2779,40 @@ extern void save_386_machine_status ();
extern void restore_386_machine_status ();
extern void clear_386_stack_locals ();
extern struct rtx_def *assign_386_stack_local ();
+extern int is_mul ();
+extern int is_div ();
+extern int last_to_set_cc ();
+extern int doesnt_set_condition_code ();
+extern int sets_condition_code ();
+extern int str_immediate_operand ();
+extern int is_fp_insn ();
+extern int is_fp_dest ();
+extern int is_fp_store ();
+extern int agi_dependent ();
+extern int reg_mentioned_in_mem ();
+extern char *output_int_conditional_move ();
+extern char *output_fp_conditional_move ();
+extern int ix86_can_use_return_insn_p ();
+
+#ifdef NOTYET
+extern struct rtx_def *copy_all_rtx ();
+extern void rewrite_address ();
+#endif
/* Variables in i386.c */
+extern char *ix86_cpu_string; /* for -mcpu=<xxx> */
+extern char *ix86_arch_string; /* for -march=<xxx> */
extern char *i386_reg_alloc_order; /* register allocation order */
extern char *i386_regparm_string; /* # registers to use to pass args */
extern char *i386_align_loops_string; /* power of two alignment for loops */
extern char *i386_align_jumps_string; /* power of two alignment for non-loop jumps */
extern char *i386_align_funcs_string; /* power of two alignment for functions */
+extern char *i386_branch_cost_string; /* values 1-5: see jump.c */
extern int i386_regparm; /* i386_regparm_string as a number */
extern int i386_align_loops; /* power of two alignment for loops */
extern int i386_align_jumps; /* power of two alignment for non-loop jumps */
extern int i386_align_funcs; /* power of two alignment for functions */
+extern int i386_branch_cost; /* values 1-5: see jump.c */
extern char *hi_reg_name[]; /* names for 16 bit regs */
extern char *qi_reg_name[]; /* names for 8 bit regs (low) */
extern char *qi_high_reg_name[]; /* names for 8 bit regs (high) */
@@ -1921,11 +2821,12 @@ extern struct rtx_def *i386_compare_op0; /* operand 0 for comparisons */
extern struct rtx_def *i386_compare_op1; /* operand 1 for comparisons */
/* External variables used */
-extern int optimize; /* optimization level */
-extern int obey_regdecls; /* TRUE if stupid register allocation */
+extern int optimize; /* optimization level */
+extern int obey_regdecls; /* TRUE if stupid register allocation */
/* External functions used */
extern struct rtx_def *force_operand ();
+
/*
Local variables:
diff --git a/contrib/gcc/config/i386/i386.md b/contrib/gcc/config/i386/i386.md
index ff01196..f85992f 100644
--- a/contrib/gcc/config/i386/i386.md
+++ b/contrib/gcc/config/i386/i386.md
@@ -1,5 +1,5 @@
-;; GCC machine description for Intel X86.
-;; Copyright (C) 1988, 1994, 1995 Free Software Foundation, Inc.
+; GCC machine description for Intel X86.
+;; Copyright (C) 1988, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
;; Mostly by William Schelter.
;; This file is part of GNU CC.
@@ -17,8 +17,7 @@
;; You should have received a copy of the GNU General Public License
;; along with GNU CC; see the file COPYING. If not, write to
;; the Free Software Foundation, 59 Temple Place - Suite 330,
-;; Boston, MA 02111-1307, USA.
-
+;; Boston, MA 02111-1307, USA. */
;; The original PO technology requires these to be ordered by speed,
;; so that assigner will pick the fastest.
@@ -37,8 +36,8 @@
;; 'L' Print the opcode suffix for a 32-bit integer opcode.
;; 'W' Print the opcode suffix for a 16-bit integer opcode.
;; 'B' Print the opcode suffix for an 8-bit integer opcode.
-;; 'S' Print the opcode suffix for a 32-bit float opcode.
;; 'Q' Print the opcode suffix for a 64-bit float opcode.
+;; 'S' Print the opcode suffix for a 32-bit float opcode.
;; 'T' Print the opcode suffix for an 80-bit extended real XFmode float opcode.
;; 'J' Print the appropriate jump operand.
@@ -59,6 +58,72 @@
;; operand 0 is the argument for `sin'.
;; 2 This is a `cos' operation. The mode of the UNSPEC is MODE_FLOAT.
;; operand 0 is the argument for `cos'.
+;; 3 This is part of a `stack probe' operation. The mode of the UNSPEC is
+;; always SImode. operand 0 is the size of the stack allocation.
+;; 4 This is the source of a fake SET of the frame pointer which is used to
+;; prevent insns referencing it being scheduled across the initial
+;; decrement of the stack pointer.
+;; 5 This is a `bsf' operation.
+
+;; This shadows the processor_type enumeration, so changes must be made
+;; to i386.h at the same time.
+
+(define_attr "type" "integer,idiv,imul,fld,fpop,fpdiv,fpmul"
+ (const_string "integer"))
+
+;; Functional units
+
+; (define_function_unit NAME MULTIPLICITY SIMULTANEITY
+; TEST READY-DELAY ISSUE-DELAY [CONFLICT-LIST])
+
+; pentiumpro has a reservation station with 5 ports
+; port 0 has integer, float add, integer divide, float divide, float
+; multiply, and shifter units.
+; port 1 has integer, and jump units.
+; port 2 has the load address generation unit
+; ports 3 and 4 have the store address generation units
+
+; pentium has two integer pipelines, the main u pipe and the secondary v pipe.
+; and a float pipeline
+
+;; Floating point
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "fpop") (eq_attr "cpu" "i386,i486"))
+ 5 5)
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "fpop") (eq_attr "cpu" "pentium,pentiumpro"))
+ 3 0)
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "fpmul") (eq_attr "cpu" "pentium"))
+ 7 0)
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "fpmul") (eq_attr "cpu" "pentiumpro"))
+ 5 0)
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "idiv") (eq_attr "cpu" "pentiumpro"))
+ 10 10)
+
+(define_function_unit "fp" 1 0
+ (and (eq_attr "type" "imul") (eq_attr "cpu" "pentiumpro"))
+ 6 0)
+
+(define_function_unit "fp" 1 0
+ (eq_attr "type" "fpdiv")
+ 10 10)
+
+(define_function_unit "fp" 1 0
+ (eq_attr "type" "fld")
+ 1 0)
+
+(define_function_unit "integer" 1 0
+ (and (eq_attr "type" "integer") (eq_attr "cpu" "!i386"))
+ 2 0)
+
;; "movl MEM,REG / testl REG,REG" is faster on a 486 than "cmpl $0,MEM".
;; But restricting MEM here would mean that gcc could not remove a redundant
@@ -72,6 +137,12 @@
;; actually generating RTL. The bCOND or sCOND (emitted immediately
;; after the tstM or cmp) will actually emit the tstM or cmpM.
+;; Processor type -- this attribute must exactly match the processor_type
+;; enumeration in i386.h.
+
+(define_attr "cpu" "i386,i486,pentium,pentiumpro"
+ (const (symbol_ref "ix86_cpu")))
+
(define_insn "tstsi_1"
[(set (cc0)
(match_operand:SI 0 "nonimmediate_operand" "rm"))]
@@ -93,6 +164,7 @@
{
i386_compare_gen = gen_tstsi_1;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -117,6 +189,7 @@
{
i386_compare_gen = gen_tsthi_1;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -141,6 +214,7 @@
{
i386_compare_gen = gen_tstqi_1;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -174,6 +248,7 @@
{
i386_compare_gen = gen_tstsf_cc;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -207,6 +282,7 @@
{
i386_compare_gen = gen_tstdf_cc;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -228,7 +304,7 @@
return output_fp_cc0_set (insn);
}")
-;; Don't generate tstdf if generating IEEE code, since the `ftst' opcode
+;; Don't generate tstxf if generating IEEE code, since the `ftst' opcode
;; isn't IEEE compliant.
(define_expand "tstxf"
@@ -240,6 +316,7 @@
{
i386_compare_gen = gen_tstxf_cc;
i386_compare_op0 = operands[0];
+ i386_compare_op1 = const0_rtx;
DONE;
}")
@@ -251,15 +328,7 @@
(compare (match_operand:SI 0 "nonimmediate_operand" "mr,r")
(match_operand:SI 1 "general_operand" "ri,mr")))]
"GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
- "*
-{
- if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
- {
- cc_status.flags |= CC_REVERSED;
- return AS2 (cmp%L0,%0,%1);
- }
- return AS2 (cmp%L0,%1,%0);
-}")
+ "* return AS2 (cmp%L0,%1,%0);")
(define_expand "cmpsi"
[(set (cc0)
@@ -282,15 +351,7 @@
(compare (match_operand:HI 0 "nonimmediate_operand" "mr,r")
(match_operand:HI 1 "general_operand" "ri,mr")))]
"GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
- "*
-{
- if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
- {
- cc_status.flags |= CC_REVERSED;
- return AS2 (cmp%W0,%0,%1);
- }
- return AS2 (cmp%W0,%1,%0);
-}")
+ "* return AS2 (cmp%W0,%1,%0);")
(define_expand "cmphi"
[(set (cc0)
@@ -313,15 +374,7 @@
(compare (match_operand:QI 0 "nonimmediate_operand" "q,mq")
(match_operand:QI 1 "general_operand" "qm,nq")))]
"GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM"
- "*
-{
- if (CONSTANT_P (operands[0]) || GET_CODE (operands[1]) == MEM)
- {
- cc_status.flags |= CC_REVERSED;
- return AS2 (cmp%B0,%0,%1);
- }
- return AS2 (cmp%B0,%1,%0);
-}")
+ "* return AS2 (cmp%B0,%1,%0);")
(define_expand "cmpqi"
[(set (cc0)
@@ -346,11 +399,10 @@
(define_insn ""
[(set (cc0)
(match_operator 2 "VOIDmode_compare_op"
- [(match_operand:XF 0 "nonimmediate_operand" "f")
- (match_operand:XF 1 "nonimmediate_operand" "f")]))
+ [(match_operand:XF 0 "register_operand" "f")
+ (match_operand:XF 1 "register_operand" "f")]))
(clobber (match_scratch:HI 3 "=a"))]
- "TARGET_80387
- && (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)"
+ "TARGET_80387"
"* return output_float_compare (insn, operands);")
(define_insn ""
@@ -386,6 +438,16 @@
(define_insn ""
[(set (cc0)
(match_operator 2 "VOIDmode_compare_op"
+ [(float_extend:XF
+ (match_operand:DF 0 "nonimmediate_operand" "fm"))
+ (match_operand:XF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
[(match_operand:XF 0 "register_operand" "f")
(float_extend:XF
(match_operand:SF 1 "nonimmediate_operand" "fm"))]))
@@ -395,6 +457,16 @@
(define_insn ""
[(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float_extend:XF
+ (match_operand:SF 0 "nonimmediate_operand" "fm"))
+ (match_operand:XF 1 "register_operand" "f")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
(compare:CCFPEQ (match_operand:XF 0 "register_operand" "f")
(match_operand:XF 1 "register_operand" "f")))
(clobber (match_scratch:HI 2 "=a"))]
@@ -453,6 +525,16 @@
(define_insn ""
[(set (cc0)
+ (match_operator 2 "VOIDmode_compare_op"
+ [(float_extend:DF
+ (match_operand:SF 0 "register_operand" "f"))
+ (match_operand:DF 1 "nonimmediate_operand" "fm")]))
+ (clobber (match_scratch:HI 3 "=a"))]
+ "TARGET_80387"
+ "* return output_float_compare (insn, operands);")
+
+(define_insn ""
+ [(set (cc0)
(compare:CCFPEQ (match_operand:DF 0 "register_operand" "f")
(match_operand:DF 1 "register_operand" "f")))
(clobber (match_scratch:HI 2 "=a"))]
@@ -520,7 +602,7 @@
(define_expand "cmpxf"
[(set (cc0)
(compare (match_operand:XF 0 "register_operand" "")
- (match_operand:XF 1 "nonimmediate_operand" "")))]
+ (match_operand:XF 1 "register_operand" "")))]
"TARGET_80387"
"
{
@@ -534,28 +616,30 @@
(define_expand "cmpdf"
[(set (cc0)
(compare (match_operand:DF 0 "register_operand" "")
- (match_operand:DF 1 "nonimmediate_operand" "")))]
+ (match_operand:DF 1 "general_operand" "")))]
"TARGET_80387"
"
{
i386_compare_gen = gen_cmpdf_cc;
i386_compare_gen_eq = gen_cmpdf_ccfpeq;
i386_compare_op0 = operands[0];
- i386_compare_op1 = operands[1];
+ i386_compare_op1 = (immediate_operand (operands[1], DFmode))
+ ? copy_to_mode_reg (DFmode, operands[1]) : operands[1];
DONE;
}")
(define_expand "cmpsf"
[(set (cc0)
(compare (match_operand:SF 0 "register_operand" "")
- (match_operand:SF 1 "nonimmediate_operand" "")))]
+ (match_operand:SF 1 "general_operand" "")))]
"TARGET_80387"
"
{
i386_compare_gen = gen_cmpsf_cc;
i386_compare_gen_eq = gen_cmpsf_ccfpeq;
i386_compare_op0 = operands[0];
- i386_compare_op1 = operands[1];
+ i386_compare_op1 = (immediate_operand (operands[1], SFmode))
+ ? copy_to_mode_reg (SFmode, operands[1]) : operands[1];
DONE;
}")
@@ -573,11 +657,7 @@
(match_operand:XF 1 "register_operand" "")))
(clobber (match_scratch:HI 2 ""))])]
"TARGET_80387"
- "
-{
- if (! register_operand (operands[1], XFmode))
- operands[1] = copy_to_mode_reg (XFmode, operands[1]);
-}")
+ "")
(define_expand "cmpdf_cc"
[(parallel [(set (cc0)
@@ -624,7 +704,7 @@
(define_insn ""
[(set (cc0)
(and:SI (match_operand:SI 0 "general_operand" "%ro")
- (match_operand:SI 1 "general_operand" "ri")))]
+ (match_operand:SI 1 "nonmemory_operand" "ri")))]
""
"*
{
@@ -682,7 +762,7 @@
(define_insn ""
[(set (cc0)
(and:HI (match_operand:HI 0 "general_operand" "%ro")
- (match_operand:HI 1 "general_operand" "ri")))]
+ (match_operand:HI 1 "nonmemory_operand" "ri")))]
""
"*
{
@@ -715,6 +795,12 @@
}
}
+ /* use 32-bit test instruction if there are no sign issues */
+ if (GET_CODE (operands[1]) == CONST_INT
+ && !(INTVAL (operands[1]) & ~0x7fff)
+ && i386_aligned_p (operands[0]))
+ return AS2 (test%L0,%1,%k0);
+
if (CONSTANT_P (operands[1]) || GET_CODE (operands[0]) == MEM)
return AS2 (test%W0,%1,%0);
@@ -723,8 +809,8 @@
(define_insn ""
[(set (cc0)
- (and:QI (match_operand:QI 0 "general_operand" "%qm")
- (match_operand:QI 1 "general_operand" "qi")))]
+ (and:QI (match_operand:QI 0 "nonimmediate_operand" "%qm")
+ (match_operand:QI 1 "nonmemory_operand" "qi")))]
""
"*
{
@@ -741,24 +827,23 @@
(define_insn ""
[(set (match_operand:SI 0 "push_operand" "=<")
- (match_operand:SI 1 "general_operand" "g"))]
- "TARGET_386"
- "push%L0 %1")
-
-;; On a 486, it is faster to move MEM to a REG and then push, rather than
-;; push MEM directly.
+ (match_operand:SI 1 "nonmemory_operand" "rn"))]
+ "flag_pic"
+ "* return AS1 (push%L0,%1);")
(define_insn ""
[(set (match_operand:SI 0 "push_operand" "=<")
(match_operand:SI 1 "nonmemory_operand" "ri"))]
- "!TARGET_386 && TARGET_MOVE"
- "push%L0 %1")
+ "!flag_pic"
+ "* return AS1 (push%L0,%1);")
+
+;; On a 386, it is faster to push MEM directly.
(define_insn ""
[(set (match_operand:SI 0 "push_operand" "=<")
- (match_operand:SI 1 "general_operand" "ri"))]
- "!TARGET_386 && !TARGET_MOVE"
- "push%L0 %1")
+ (match_operand:SI 1 "memory_operand" "m"))]
+ "TARGET_PUSH_MEMORY"
+ "* return AS1 (push%L0,%1);")
;; General case of fullword move.
@@ -790,8 +875,10 @@
(define_insn ""
[(set (match_operand:SI 0 "general_operand" "=g,r")
- (match_operand:SI 1 "general_operand" "ri,m"))]
- "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
+ (match_operand:SI 1 "general_operand" "rn,im"))]
+ "((!TARGET_MOVE || GET_CODE (operands[0]) != MEM)
+ || (GET_CODE (operands[1]) != MEM))
+ && flag_pic"
"*
{
rtx link;
@@ -810,29 +897,50 @@
/* Fastest way to change a 0 to a 1. */
return AS1 (inc%L0,%0);
- if (flag_pic && SYMBOLIC_CONST (operands[1]))
+ if (SYMBOLIC_CONST (operands[1]))
return AS2 (lea%L0,%a1,%0);
return AS2 (mov%L0,%1,%0);
}")
(define_insn ""
- [(set (match_operand:HI 0 "push_operand" "=<")
- (match_operand:HI 1 "general_operand" "g"))]
- "TARGET_386"
- "push%W0 %1")
+ [(set (match_operand:SI 0 "general_operand" "=g,r")
+ (match_operand:SI 1 "general_operand" "ri,m"))]
+ "((!TARGET_MOVE || GET_CODE (operands[0]) != MEM)
+ || (GET_CODE (operands[1]) != MEM))
+ && !flag_pic"
+ "*
+{
+ rtx link;
+ if (operands[1] == const0_rtx && REG_P (operands[0]))
+ return AS2 (xor%L0,%0,%0);
+
+ if (operands[1] == const1_rtx
+ && (link = find_reg_note (insn, REG_WAS_0, 0))
+ /* Make sure the insn that stored the 0 is still present. */
+ && ! INSN_DELETED_P (XEXP (link, 0))
+ && GET_CODE (XEXP (link, 0)) != NOTE
+ /* Make sure cross jumping didn't happen here. */
+ && no_labels_between_p (XEXP (link, 0), insn)
+ /* Make sure the reg hasn't been clobbered. */
+ && ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
+ /* Fastest way to change a 0 to a 1. */
+ return AS1 (inc%L0,%0);
+
+ return AS2 (mov%L0,%1,%0);
+}")
(define_insn ""
[(set (match_operand:HI 0 "push_operand" "=<")
(match_operand:HI 1 "nonmemory_operand" "ri"))]
- "!TARGET_386 && TARGET_MOVE"
- "push%W0 %1")
+ ""
+ "* return AS1 (push%W0,%1);")
(define_insn ""
[(set (match_operand:HI 0 "push_operand" "=<")
- (match_operand:HI 1 "general_operand" "ri"))]
- "!TARGET_386 && !TARGET_MOVE"
- "push%W0 %1")
+ (match_operand:HI 1 "memory_operand" "m"))]
+ "TARGET_PUSH_MEMORY"
+ "* return AS1 (push%W0,%1);")
;; On i486, an incl and movl are both faster than incw and movw.
@@ -876,10 +984,21 @@
if (REG_P (operands[0]))
{
- if (REG_P (operands[1]))
- return AS2 (mov%L0,%k1,%k0);
- else if (CONSTANT_P (operands[1]))
- return AS2 (mov%L0,%1,%k0);
+ if (i386_aligned_p (operands[1]))
+ {
+ operands[1] = i386_sext16_if_const (operands[1]);
+ return AS2 (mov%L0,%k1,%k0);
+ }
+ if (TARGET_PENTIUMPRO)
+ {
+ /* movzwl is faster than movw on the Pentium Pro,
+ * although not as fast as an aligned movl. */
+#ifdef INTEL_SYNTAX
+ return AS2 (movzx,%1,%k0);
+#else
+ return AS2 (movz%W0%L0,%1,%k0);
+#endif
+ }
}
return AS2 (mov%W0,%1,%0);
@@ -932,27 +1051,17 @@
;; the amount pushed up to a halfword.
(define_insn ""
[(set (match_operand:QI 0 "push_operand" "=<")
- (match_operand:QI 1 "immediate_operand" "n"))]
+ (match_operand:QI 1 "const_int_operand" "n"))]
""
- "* return AS1 (push%W0,%1);")
-
-(define_insn ""
- [(set (match_operand:QI 0 "push_operand" "=<")
- (match_operand:QI 1 "nonimmediate_operand" "q"))]
- "!TARGET_MOVE"
- "*
-{
- operands[1] = gen_rtx (REG, HImode, REGNO (operands[1]));
- return AS1 (push%W0,%1);
-}")
+ "* return AS1(push%W0,%1);")
(define_insn ""
[(set (match_operand:QI 0 "push_operand" "=<")
(match_operand:QI 1 "register_operand" "q"))]
- "TARGET_MOVE"
+ ""
"*
{
- operands[1] = gen_rtx (REG, HImode, REGNO (operands[1]));
+ operands[1] = gen_rtx_REG (HImode, REGNO (operands[1]));
return AS1 (push%W0,%1);
}")
@@ -978,14 +1087,14 @@
}")
(define_insn ""
- [(set (match_operand:QI 0 "general_operand" "=q,*r,qm")
- (match_operand:QI 1 "general_operand" "*g,q,qn"))]
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=q,*r,qm")
+ (match_operand:QI 1 "general_operand" "*g,*rn,qn"))]
"(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
"*
{
rtx link;
if (operands[1] == const0_rtx && REG_P (operands[0]))
- return AS2 (xor%B0,%0,%0);
+ return AS2 (xor%L0,%k0,%k0);
if (operands[1] == const1_rtx
&& (link = find_reg_note (insn, REG_WAS_0, 0))
@@ -996,8 +1105,14 @@
&& no_labels_between_p (XEXP (link, 0), insn)
/* Make sure the reg hasn't been clobbered. */
&& ! reg_set_between_p (operands[0], XEXP (link, 0), insn))
- /* Fastest way to change a 0 to a 1. */
- return AS1 (inc%B0,%0);
+ {
+ /* Fastest way to change a 0 to a 1.
+ If inc%B0 isn't allowed, use inc%L0. */
+ if (NON_QI_REG_P (operands[0]))
+ return AS1 (inc%L0,%k0);
+ else
+ return AS1 (inc%B0,%0);
+ }
/* If mov%B0 isn't allowed for one of these regs, use mov%L0. */
if (NON_QI_REG_P (operands[0]) || NON_QI_REG_P (operands[1]))
@@ -1032,7 +1147,7 @@
}")
(define_insn ""
- [(set (strict_low_part (match_operand:QI 0 "general_operand" "+qm,q"))
+ [(set (strict_low_part (match_operand:QI 0 "nonimmediate_operand" "+qm,q"))
(match_operand:QI 1 "general_operand" "*qn,m"))]
"(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
"*
@@ -1042,6 +1157,7 @@
return AS2 (xor%B0,%0,%0);
if (operands[1] == const1_rtx
+ && ! NON_QI_REG_P (operands[0])
&& (link = find_reg_note (insn, REG_WAS_0, 0))
/* Make sure the insn that stored the 0 is still present. */
&& ! INSN_DELETED_P (XEXP (link, 0))
@@ -1063,43 +1179,10 @@
return AS2 (mov%B0,%1,%0);
}")
-(define_expand "movsf"
- [(set (match_operand:SF 0 "general_operand" "")
- (match_operand:SF 1 "general_operand" ""))]
- ""
- "
-{
- /* Special case memory->memory moves and pushes */
- if (TARGET_MOVE
- && (reload_in_progress | reload_completed) == 0
- && GET_CODE (operands[0]) == MEM
- && (GET_CODE (operands[1]) == MEM || push_operand (operands[0], SFmode)))
- {
- rtx (*genfunc) PROTO((rtx, rtx)) = (push_operand (operands[0], SFmode))
- ? gen_movsf_push
- : gen_movsf_mem;
-
- emit_insn ((*genfunc) (operands[0], operands[1]));
- DONE;
- }
-
- /* If we are loading a floating point constant that isn't 0 or 1 into a register,
- indicate we need the pic register loaded. This could be optimized into stores
- of constants if the target eventually moves to memory, but better safe than
- sorry. */
- if (flag_pic
- && GET_CODE (operands[0]) != MEM
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && !standard_80387_constant_p (operands[1]))
- {
- current_function_uses_pic_offset_table = 1;
- }
-}")
-
-(define_insn "movsf_push_nomove"
+(define_insn "movsf_push"
[(set (match_operand:SF 0 "push_operand" "=<,<")
- (match_operand:SF 1 "general_operand" "gF,f"))]
- "!TARGET_MOVE"
+ (match_operand:SF 1 "general_operand" "*rfF,m"))]
+ "GET_CODE (operands[1]) != MEM || reload_in_progress || reload_completed"
"*
{
if (STACK_REG_P (operands[1]))
@@ -1119,64 +1202,62 @@
output_asm_insn (AS1 (fstp%S0,%0), xops);
else
output_asm_insn (AS1 (fst%S0,%0), xops);
+
RET;
}
- return AS1 (push%L1,%1);
+
+ return AS1 (push%L0,%1);
}")
-(define_insn "movsf_push"
- [(set (match_operand:SF 0 "push_operand" "=<,<,<,<")
- (match_operand:SF 1 "general_operand" "rF,f,m,m"))
- (clobber (match_scratch:SI 2 "=X,X,r,X"))]
+(define_insn "movsf_push_memory"
+ [(set (match_operand:SF 0 "push_operand" "=<")
+ (match_operand:SF 1 "memory_operand" "m"))]
+ "TARGET_PUSH_MEMORY"
+ "* return AS1 (push%L0,%1);")
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
""
- "*
+ "
{
- if (STACK_REG_P (operands[1]))
+ /* Don't generate memory->memory moves, go through a register */
+ if (TARGET_MOVE
+ && (reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
{
- rtx xops[3];
+ operands[1] = force_reg (SFmode, operands[1]);
+ }
- if (! STACK_TOP_P (operands[1]))
- abort ();
+ /* If we are loading a floating point constant that isn't 0 or 1
+ into a register, indicate we need the pic register loaded. This could
+ be optimized into stores of constants if the target eventually moves
+ to memory, but better safe than sorry. */
+ else if ((reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) != MEM
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && !standard_80387_constant_p (operands[1]))
+ {
+ rtx insn, note, fp_const;
- xops[0] = AT_SP (SFmode);
- xops[1] = GEN_INT (4);
- xops[2] = stack_pointer_rtx;
+ fp_const = force_const_mem (SFmode, operands[1]);
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
- output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+ insn = emit_insn (gen_rtx_SET (SFmode, operands[0], fp_const));
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
- output_asm_insn (AS1 (fstp%S0,%0), xops);
+ if (note)
+ XEXP (note, 0) = operands[1];
else
- output_asm_insn (AS1 (fst%S0,%0), xops);
- RET;
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, operands[1], REG_NOTES (insn));
}
-
- else if (GET_CODE (operands[1]) != MEM || GET_CODE (operands[2]) != REG)
- return AS1 (push%L1,%1);
-
- else
- {
- output_asm_insn (AS2 (mov%L2,%1,%2), operands);
- return AS1 (push%L2,%2);
- }
-}")
-
-;; Special memory<->memory pattern that combine will recreate from the
-;; moves to pseudos.
-(define_insn "movsf_mem"
- [(set (match_operand:SF 0 "memory_operand" "=m")
- (match_operand:SF 1 "memory_operand" "m"))
- (clobber (match_scratch:SI 2 "=&r"))]
- ""
- "*
-{
- output_asm_insn (AS2 (mov%L2,%1,%2), operands);
- return AS2 (mov%L0,%2,%0);
}")
;; For the purposes of regclass, prefer FLOAT_REGS.
-(define_insn "movsf_normal"
- [(set (match_operand:SF 0 "general_operand" "=*rfm,*rf,f,!*rm")
+(define_insn ""
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=*rfm,*rf,f,!*rm")
(match_operand:SF 1 "general_operand" "*rf,*rfm,fG,fF"))]
"(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
"*
@@ -1203,7 +1284,7 @@
if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1228,7 +1309,9 @@
/* Handle all SFmode moves not involving the 387 */
return singlemove_string (operands);
-}")
+}"
+ [(set_attr "type" "fld")])
+
(define_insn "swapsf"
[(set (match_operand:SF 0 "register_operand" "f")
@@ -1244,50 +1327,17 @@
return AS1 (fxch,%0);
}")
-(define_expand "movdf"
- [(set (match_operand:DF 0 "general_operand" "")
- (match_operand:DF 1 "general_operand" ""))]
- ""
- "
-{
- /* Special case memory->memory moves and pushes */
- if (TARGET_MOVE
- && (reload_in_progress | reload_completed) == 0
- && GET_CODE (operands[0]) == MEM
- && (GET_CODE (operands[1]) == MEM || push_operand (operands[0], DFmode)))
- {
- rtx (*genfunc) PROTO((rtx, rtx)) = (push_operand (operands[0], DFmode))
- ? gen_movdf_push
- : gen_movdf_mem;
-
- emit_insn ((*genfunc) (operands[0], operands[1]));
- DONE;
- }
-
- /* If we are loading a floating point constant that isn't 0 or 1 into a register,
- indicate we need the pic register loaded. This could be optimized into stores
- of constants if the target eventually moves to memory, but better safe than
- sorry. */
- if (flag_pic
- && GET_CODE (operands[0]) != MEM
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && !standard_80387_constant_p (operands[1]))
- {
- current_function_uses_pic_offset_table = 1;
- }
-}")
-
-(define_insn "movdf_push_nomove"
+(define_insn "movdf_push"
[(set (match_operand:DF 0 "push_operand" "=<,<")
- (match_operand:DF 1 "general_operand" "gF,f"))]
- "!TARGET_MOVE"
+ (match_operand:DF 1 "general_operand" "*rfF,o"))]
+ "GET_CODE (operands[1]) != MEM || reload_in_progress || reload_completed"
"*
{
if (STACK_REG_P (operands[1]))
{
rtx xops[3];
- xops[0] = AT_SP (SFmode);
+ xops[0] = AT_SP (DFmode);
xops[1] = GEN_INT (8);
xops[2] = stack_pointer_rtx;
@@ -1300,56 +1350,65 @@
RET;
}
- else
- return output_move_double (operands);
+
+ if (which_alternative == 1)
+ return output_move_pushmem (operands, insn, GET_MODE_SIZE (DFmode), 0, 0);
+
+ return output_move_double (operands);
}")
-(define_insn "movdf_push"
- [(set (match_operand:DF 0 "push_operand" "=<,<,<,<,<")
- (match_operand:DF 1 "general_operand" "rF,f,o,o,o"))
- (clobber (match_scratch:SI 2 "=X,X,&r,&r,X"))
- (clobber (match_scratch:SI 3 "=X,X,&r,X,X"))]
+(define_insn "movdf_push_memory"
+ [(set (match_operand:DF 0 "push_operand" "=<")
+ (match_operand:DF 1 "memory_operand" "o"))]
+ "TARGET_PUSH_MEMORY"
+ "* return output_move_pushmem (operands, insn, GET_MODE_SIZE (DFmode),0,0);")
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
""
- "*
+ "
{
- if (STACK_REG_P (operands[1]))
+ /* Don't generate memory->memory moves, go through a register */
+ if (TARGET_MOVE
+ && (reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
{
- rtx xops[3];
+ operands[1] = force_reg (DFmode, operands[1]);
+ }
- xops[0] = AT_SP (SFmode);
- xops[1] = GEN_INT (8);
- xops[2] = stack_pointer_rtx;
+ /* If we are loading a floating point constant that isn't 0 or 1 into a
+ register, indicate we need the pic register loaded. This could be
+ optimized into stores of constants if the target eventually moves to
+ memory, but better safe than sorry. */
+ else if ((reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) != MEM
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && !standard_80387_constant_p (operands[1]))
+ {
+ rtx insn, note, fp_const;
- output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+ fp_const = force_const_mem (DFmode, operands[1]);
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
- if (find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
- output_asm_insn (AS1 (fstp%Q0,%0), xops);
- else
- output_asm_insn (AS1 (fst%Q0,%0), xops);
+ insn = emit_insn (gen_rtx_SET (DFmode, operands[0], fp_const));
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- RET;
+ if (note)
+ XEXP (note, 0) = operands[1];
+ else
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, operands[1], REG_NOTES (insn));
}
-
- else if (GET_CODE (operands[1]) != MEM)
- return output_move_double (operands);
-
- else
- return output_move_pushmem (operands, insn, GET_MODE_SIZE (DFmode), 2, 4);
}")
-(define_insn "movdf_mem"
- [(set (match_operand:DF 0 "memory_operand" "=o,o")
- (match_operand:DF 1 "memory_operand" "o,o"))
- (clobber (match_scratch:SI 2 "=&r,&r"))
- (clobber (match_scratch:SI 3 "=&r,X"))]
- ""
- "* return output_move_memory (operands, insn, GET_MODE_SIZE (DFmode), 2, 4);")
-
;; For the purposes of regclass, prefer FLOAT_REGS.
-(define_insn "movdf_normal"
- [(set (match_operand:DF 0 "general_operand" "=f,fm,!*rf,!*rm")
+(define_insn ""
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,fm,!*rf,!*rm")
(match_operand:DF 1 "general_operand" "fmG,f,*rfm,*rfF"))]
- "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
+ "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM)
+ || (GET_CODE (operands[1]) != MEM)"
"*
{
int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
@@ -1374,7 +1433,7 @@
if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1399,7 +1458,10 @@
/* Handle all DFmode moves not involving the 387 */
return output_move_double (operands);
-}")
+}"
+ [(set_attr "type" "fld")])
+
+
(define_insn "swapdf"
[(set (match_operand:DF 0 "register_operand" "f")
@@ -1415,109 +1477,87 @@
return AS1 (fxch,%0);
}")
-(define_expand "movxf"
- [(set (match_operand:XF 0 "general_operand" "")
- (match_operand:XF 1 "general_operand" ""))]
- ""
- "
-{
- /* Special case memory->memory moves and pushes */
- if (TARGET_MOVE
- && (reload_in_progress | reload_completed) == 0
- && GET_CODE (operands[0]) == MEM
- && (GET_CODE (operands[1]) == MEM || push_operand (operands[0], XFmode)))
- {
- rtx (*genfunc) PROTO((rtx, rtx)) = (push_operand (operands[0], XFmode))
- ? gen_movxf_push
- : gen_movxf_mem;
-
- emit_insn ((*genfunc) (operands[0], operands[1]));
- DONE;
- }
-
- /* If we are loading a floating point constant that isn't 0 or 1 into a register,
- indicate we need the pic register loaded. This could be optimized into stores
- of constants if the target eventually moves to memory, but better safe than
- sorry. */
- if (flag_pic
- && GET_CODE (operands[0]) != MEM
- && GET_CODE (operands[1]) == CONST_DOUBLE
- && !standard_80387_constant_p (operands[1]))
- {
- current_function_uses_pic_offset_table = 1;
- }
-}")
-
-
-(define_insn "movxf_push_nomove"
+(define_insn "movxf_push"
[(set (match_operand:XF 0 "push_operand" "=<,<")
- (match_operand:XF 1 "general_operand" "gF,f"))]
- "!TARGET_MOVE"
+ (match_operand:XF 1 "general_operand" "*rfF,o"))]
+ "GET_CODE (operands[1]) != MEM || reload_in_progress || reload_completed"
"*
{
if (STACK_REG_P (operands[1]))
{
rtx xops[3];
- xops[0] = AT_SP (SFmode);
+ xops[0] = AT_SP (XFmode);
xops[1] = GEN_INT (12);
xops[2] = stack_pointer_rtx;
output_asm_insn (AS2 (sub%L2,%1,%2), xops);
+
output_asm_insn (AS1 (fstp%T0,%0), xops);
if (! find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
output_asm_insn (AS1 (fld%T0,%0), xops);
RET;
}
- else
- return output_move_double (operands);
+
+ if (which_alternative == 1)
+ return output_move_pushmem (operands, insn, GET_MODE_SIZE (XFmode), 0, 0);
+
+ return output_move_double (operands);
}")
-(define_insn "movxf_push"
- [(set (match_operand:XF 0 "push_operand" "=<,<,<,<,<")
- (match_operand:XF 1 "general_operand" "rF,f,o,o,o"))
- (clobber (match_scratch:SI 2 "=X,X,&r,&r,X"))
- (clobber (match_scratch:SI 3 "=X,X,&r,X,X"))]
+(define_insn "movxf_push_memory"
+ [(set (match_operand:XF 0 "push_operand" "=<")
+ (match_operand:XF 1 "memory_operand" "o"))]
+ "TARGET_PUSH_MEMORY"
+ "* return output_move_pushmem (operands, insn, GET_MODE_SIZE (XFmode),0,0);")
+
+(define_expand "movxf"
+ [(set (match_operand:XF 0 "general_operand" "")
+ (match_operand:XF 1 "general_operand" ""))]
""
- "*
+ "
{
- if (STACK_REG_P (operands[1]))
+ /* Don't generate memory->memory moves, go through a register */
+ if (TARGET_MOVE
+ && (reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
{
- rtx xops[3];
-
- xops[0] = AT_SP (SFmode);
- xops[1] = GEN_INT (12);
- xops[2] = stack_pointer_rtx;
+ operands[1] = force_reg (XFmode, operands[1]);
+ }
- output_asm_insn (AS2 (sub%L2,%1,%2), xops);
- output_asm_insn (AS1 (fstp%T0,%0), xops);
- if (! find_regno_note (insn, REG_DEAD, FIRST_STACK_REG))
- output_asm_insn (AS1 (fld%T0,%0), xops);
+ /* If we are loading a floating point constant that isn't 0 or 1
+ into a register, indicate we need the pic register loaded. This could
+ be optimized into stores of constants if the target eventually moves
+ to memory, but better safe than sorry. */
+ else if ((reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) != MEM
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && !standard_80387_constant_p (operands[1]))
+ {
+ rtx insn, note, fp_const;
- RET;
- }
+ fp_const = force_const_mem (XFmode, operands[1]);
+ if (flag_pic)
+ current_function_uses_pic_offset_table = 1;
- else if (GET_CODE (operands[1]) != MEM
- || GET_CODE (operands[2]) != REG)
- return output_move_double (operands);
+ insn = emit_insn (gen_rtx_SET (XFmode, operands[0], fp_const));
+ note = find_reg_note (insn, REG_EQUAL, NULL_RTX);
- else
- return output_move_pushmem (operands, insn, GET_MODE_SIZE (XFmode), 2, 4);
+ if (note)
+ XEXP (note, 0) = operands[1];
+ else
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, operands[1], REG_NOTES (insn));
+ }
}")
-(define_insn "movxf_mem"
- [(set (match_operand:XF 0 "memory_operand" "=o,o")
- (match_operand:XF 1 "memory_operand" "o,o"))
- (clobber (match_scratch:SI 2 "=&r,&r"))
- (clobber (match_scratch:SI 3 "=&r,X"))]
- ""
- "* return output_move_memory (operands, insn, GET_MODE_SIZE (XFmode), 2, 4);")
-(define_insn "movxf_normal"
- [(set (match_operand:XF 0 "general_operand" "=f,fm,!*rf,!*rm")
+(define_insn ""
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=f,fm,!*rf,!*rm")
(match_operand:XF 1 "general_operand" "fmG,f,*rfm,*rfF"))]
- "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
+ "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM)
+ || (GET_CODE (operands[1]) != MEM)"
"*
{
int stack_top_dies = find_regno_note (insn, REG_DEAD, FIRST_STACK_REG) != 0;
@@ -1542,7 +1582,7 @@
if (STACK_TOP_P (operands[1]) && NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1570,7 +1610,7 @@
return output_move_double (operands);
}")
-(define_insn "swapxf"
+(define_insn "swapxf"
[(set (match_operand:XF 0 "register_operand" "f")
(match_operand:XF 1 "register_operand" "f"))
(set (match_dup 1)
@@ -1585,36 +1625,40 @@
}")
(define_insn ""
- [(set (match_operand:DI 0 "push_operand" "=<,<,<,<")
- (match_operand:DI 1 "general_operand" "riF,o,o,o"))
- (clobber (match_scratch:SI 2 "=X,&r,&r,X"))
- (clobber (match_scratch:SI 3 "=X,&r,X,X"))]
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (match_operand:DI 1 "general_operand" "riF"))]
""
- "*
-{
- if (GET_CODE (operands[1]) != MEM)
- return output_move_double (operands);
+ "* return output_move_double (operands);")
- else
- return output_move_pushmem (operands, insn, GET_MODE_SIZE (DImode), 2, 4);
-}")
+(define_insn ""
+ [(set (match_operand:DI 0 "push_operand" "=<")
+ (match_operand:DI 1 "memory_operand" "o"))]
+ "TARGET_PUSH_MEMORY"
+ "* return output_move_pushmem (operands, insn, GET_MODE_SIZE (DImode),0,0);")
-(define_insn "movdi"
- [(set (match_operand:DI 0 "general_operand" "=o,o,r,rm")
- (match_operand:DI 1 "general_operand" "o,o,m,riF"))
- (clobber (match_scratch:SI 2 "=&r,&r,X,X"))
- (clobber (match_scratch:SI 3 "=&r,X,X,X"))]
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "general_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
""
- "*
+ "
{
- rtx low[2], high[2], xop[6];
-
- if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
- return output_move_double (operands);
- else
- return output_move_memory (operands, insn, GET_MODE_SIZE (DImode), 2, 4);
+ /* Don't generate memory->memory moves, go through a register */
+ if (TARGET_MOVE
+ && (reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
+ {
+ operands[1] = force_reg (DImode, operands[1]);
+ }
}")
+(define_insn ""
+ [(set (match_operand:DI 0 "general_operand" "=g,r")
+ (match_operand:DI 1 "general_operand" "riF,m"))]
+ "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM)
+ || (GET_CODE (operands[1]) != MEM)"
+ "* return output_move_double (operands);")
+
;;- conversion instructions
;;- NONE
@@ -1623,21 +1667,39 @@
;; See comments by `andsi' for when andl is faster than movzx.
(define_insn "zero_extendhisi2"
- [(set (match_operand:SI 0 "general_operand" "=r")
- (zero_extend:SI
- (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ [(set (match_operand:SI 0 "register_operand" "=r,&r,?r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "0,rm,rm")))]
""
"*
-{
- if ((!TARGET_386 || REGNO (operands[0]) == 0)
+ {
+ rtx xops[2];
+
+ if ((TARGET_ZERO_EXTEND_WITH_AND || REGNO (operands[0]) == 0)
&& REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
{
- rtx xops[2];
xops[0] = operands[0];
xops[1] = GEN_INT (0xffff);
output_asm_insn (AS2 (and%L0,%1,%k0), xops);
RET;
}
+ if (TARGET_ZERO_EXTEND_WITH_AND && !reg_overlap_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn (AS2 (xor%L0,%0,%0),operands);
+ output_asm_insn (AS2 (mov%W0,%1,%w0),operands);
+ RET;
+ }
+
+ if (TARGET_ZERO_EXTEND_WITH_AND)
+ {
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xffff);
+ if (i386_aligned_p (operands[1]))
+ output_asm_insn (AS2 (mov%L0,%k1,%k0),operands);
+ else
+ output_asm_insn (AS2 (mov%W0,%1,%w0),operands);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
#ifdef INTEL_SYNTAX
return AS2 (movzx,%1,%0);
@@ -1646,23 +1708,62 @@
#endif
}")
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "")))]
+ "reload_completed && TARGET_ZERO_EXTEND_WITH_AND && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (strict_low_part (match_dup 2))
+ (match_dup 1))]
+ "operands[2] = gen_rtx_REG (HImode, true_regnum (operands[0]));")
+
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "")))]
+ "reload_completed && TARGET_ZERO_EXTEND_WITH_AND && reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 2))
+ (match_dup 1))
+ (set (match_dup 0)
+ (and:SI (match_dup 0)
+ (const_int 65535)))]
+ "operands[2] = gen_rtx_REG (HImode, true_regnum (operands[0]));")
+
(define_insn "zero_extendqihi2"
- [(set (match_operand:HI 0 "general_operand" "=r")
- (zero_extend:HI
- (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ [(set (match_operand:HI 0 "register_operand" "=q,&q,?r")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "0,qm,qm")))]
""
"*
-{
- if ((!TARGET_386 || REGNO (operands[0]) == 0)
- && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+
+ if ((TARGET_ZERO_EXTEND_WITH_AND || REGNO (operands[0]) == 0)
+ && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1]))
{
- rtx xops[2];
xops[0] = operands[0];
xops[1] = GEN_INT (0xff);
output_asm_insn (AS2 (and%L0,%1,%k0), xops);
RET;
}
-
+ if (TARGET_ZERO_EXTEND_WITH_AND && QI_REG_P (operands[0]))
+ {
+ if(!reg_overlap_mentioned_p(operands[0],operands[1]))
+ {
+ output_asm_insn (AS2 (xor%L0,%k0,%k0), operands);
+ output_asm_insn (AS2 (mov%B0,%1,%b0), operands);
+ }
+ else
+ {
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (mov%B0,%1,%b0),operands);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ }
+ RET;
+ }
+
#ifdef INTEL_SYNTAX
return AS2 (movzx,%1,%0);
#else
@@ -1670,22 +1771,89 @@
#endif
}")
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "reload_completed && QI_REG_P (operands[0]) && TARGET_ZERO_EXTEND_WITH_AND
+ && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (strict_low_part (match_dup 2))
+ (match_dup 1))]
+ "operands[2] = gen_rtx_REG (QImode, REGNO (operands[0]));")
+
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "memory_operand" "")))]
+ "reload_completed && QI_REG_P (operands[0]) && TARGET_ZERO_EXTEND_WITH_AND
+ && reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 2))
+ (match_dup 1))
+ (set (match_dup 0)
+ (and:HI (match_dup 0)
+ (const_int 255)))]
+ "operands[2] = gen_rtx_REG (QImode, REGNO (operands[0]));")
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed && TARGET_ZERO_EXTEND_WITH_AND"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (and:HI (match_dup 0)
+ (const_int 255)))]
+ "if (GET_CODE (operands[1]) == SUBREG && SUBREG_WORD (operands[1]) == 0)
+ operands[1] = SUBREG_REG (operands[1]);
+ if (GET_CODE (operands[0]) != REG || GET_CODE (operands[1]) != REG
+ || REGNO (operands[0]) == REGNO (operands[1]))
+ FAIL;
+ operands[2] = gen_rtx_REG (HImode, REGNO (operands[1]));")
+
(define_insn "zero_extendqisi2"
- [(set (match_operand:SI 0 "general_operand" "=r")
- (zero_extend:SI
- (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ [(set (match_operand:SI 0 "register_operand" "=q,&q,?r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "0,qm,qm")))]
""
"*
-{
- if ((!TARGET_386 || REGNO (operands[0]) == 0)
- && REG_P (operands[1]) && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ rtx xops[2];
+
+ if ((TARGET_ZERO_EXTEND_WITH_AND || REGNO (operands[0]) == 0)
+ && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1]))
{
- rtx xops[2];
xops[0] = operands[0];
xops[1] = GEN_INT (0xff);
output_asm_insn (AS2 (and%L0,%1,%k0), xops);
RET;
}
+ if (TARGET_ZERO_EXTEND_WITH_AND && QI_REG_P (operands[0]))
+ {
+ if(!reg_overlap_mentioned_p (operands[0], operands[1]))
+ {
+ output_asm_insn (AS2 (xor%L0,%0,%0),operands);
+ output_asm_insn (AS2 (mov%B0,%1,%b0),operands);
+ }
+ else
+ {
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ output_asm_insn (AS2 (mov%B0,%1,%b0), operands);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ }
+ RET;
+ }
+
+ if (TARGET_ZERO_EXTEND_WITH_AND && GET_CODE (operands[1]) == REG)
+ {
+ xops[0] = operands[0];
+ xops[1] = GEN_INT (0xff);
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[1]));
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+ output_asm_insn (AS2 (and%L0,%1,%k0), xops);
+ RET;
+ }
#ifdef INTEL_SYNTAX
return AS2 (movzx,%1,%0);
@@ -1694,23 +1862,77 @@
#endif
}")
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "")))]
+ "reload_completed && QI_REG_P (operands[0]) && TARGET_ZERO_EXTEND_WITH_AND
+ && !reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0)
+ (const_int 0))
+ (set (strict_low_part (match_dup 2))
+ (match_dup 1))]
+ "operands[2] = gen_rtx_REG (QImode, REGNO (operands[0]));")
+
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "memory_operand" "")))]
+ "reload_completed && QI_REG_P (operands[0]) && TARGET_ZERO_EXTEND_WITH_AND
+ && reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (strict_low_part (match_dup 2))
+ (match_dup 1))
+ (set (match_dup 0)
+ (and:SI (match_dup 0)
+ (const_int 255)))]
+ "operands[2] = gen_rtx_REG (QImode, REGNO (operands[0]));")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ "reload_completed && TARGET_ZERO_EXTEND_WITH_AND
+ && ! reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (and:SI (match_dup 0)
+ (const_int 255)))]
+ "operands[2] = gen_rtx_REG (SImode, true_regnum (operands[1]));")
+
(define_insn "zero_extendsidi2"
- [(set (match_operand:DI 0 "register_operand" "=r")
- (zero_extend:DI
- (match_operand:SI 1 "register_operand" "0")))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,?r,?m")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "0,rm,r")))]
""
"*
-{
- operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
- return AS2 (xor%L0,%0,%0);
+ {
+ rtx high[2], low[2], xops[4];
+
+ if (REG_P (operands[0]) && REG_P (operands[1])
+ && REGNO (operands[0]) == REGNO (operands[1]))
+ {
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
+ return AS2 (xor%L0,%0,%0);
+ }
+
+ split_di (operands, 1, low, high);
+ xops[0] = low[0];
+ xops[1] = operands[1];
+ xops[2] = high[0];
+ xops[3] = const0_rtx;
+
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+ if (GET_CODE (low[0]) == MEM)
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ else
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+
+ RET;
}")
;;- sign extension instructions
(define_insn "extendsidi2"
[(set (match_operand:DI 0 "register_operand" "=r")
- (sign_extend:DI
- (match_operand:SI 1 "register_operand" "0")))]
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "0")))]
""
"*
{
@@ -1724,7 +1946,7 @@
#endif
}
- operands[1] = gen_rtx (REG, SImode, REGNO (operands[0]) + 1);
+ operands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
output_asm_insn (AS2 (mov%L0,%0,%1), operands);
operands[0] = GEN_INT (31);
@@ -1736,9 +1958,8 @@
;; We use what the Unix assembler expects.
(define_insn "extendhisi2"
- [(set (match_operand:SI 0 "general_operand" "=r")
- (sign_extend:SI
- (match_operand:HI 1 "nonimmediate_operand" "rm")))]
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "rm")))]
""
"*
{
@@ -1758,9 +1979,8 @@
}")
(define_insn "extendqihi2"
- [(set (match_operand:HI 0 "general_operand" "=r")
- (sign_extend:HI
- (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
""
"*
{
@@ -1776,9 +1996,8 @@
}")
(define_insn "extendqisi2"
- [(set (match_operand:SI 0 "general_operand" "=r")
- (sign_extend:SI
- (match_operand:QI 1 "nonimmediate_operand" "qm")))]
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm")))]
""
"*
{
@@ -1788,13 +2007,72 @@
return AS2 (movs%B0%L0,%1,%0);
#endif
}")
+
+
+;; Truncation of long long -> 32 bit
+
+(define_expand "truncdisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
+ (truncate:SI (match_operand:DI 1 "nonimmediate_operand" "ro,r")))]
+ ""
+ "
+{
+ /* Don't generate memory->memory moves, go through a register */
+ if (TARGET_MOVE
+ && (reload_in_progress | reload_completed) == 0
+ && GET_CODE (operands[0]) == MEM
+ && GET_CODE (operands[1]) == MEM)
+ {
+ rtx target = gen_reg_rtx (SImode);
+ emit_insn (gen_truncdisi2 (target, operands[1]));
+ emit_move_insn (operands[0], target);
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
+ (truncate:SI (match_operand:DI 1 "nonimmediate_operand" "ro,r")))]
+ "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
+ "*
+{
+ rtx low[2], high[2], xops[2];
+
+ split_di (&operands[1], 1, low, high);
+ xops[0] = operands[0];
+ xops[1] = low[0];
+ if (!rtx_equal_p (xops[0], xops[1]))
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
+ RET;
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,m")
+ (truncate:SI (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "ro,r")
+ (const_int 32))))]
+ "(!TARGET_MOVE || GET_CODE (operands[0]) != MEM) || (GET_CODE (operands[1]) != MEM)"
+ "*
+{
+ rtx low[2], high[2], xops[2];
+
+ split_di (&operands[1], 1, low, high);
+ xops[0] = operands[0];
+ xops[1] = high[0];
+ if (!rtx_equal_p (xops[0], xops[1]))
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
+ RET;
+}")
+
+
;; Conversions between float and double.
(define_insn "extendsfdf2"
- [(set (match_operand:DF 0 "general_operand" "=fm,f")
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=fm,f")
(float_extend:DF
- (match_operand:SF 1 "general_operand" "f,fm")))]
+ (match_operand:SF 1 "nonimmediate_operand" "f,fm")))]
"TARGET_80387"
"*
{
@@ -1808,7 +2086,7 @@
if (NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1827,9 +2105,9 @@
}")
(define_insn "extenddfxf2"
- [(set (match_operand:XF 0 "general_operand" "=fm,f,f,!*r")
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f,f,!*r")
(float_extend:XF
- (match_operand:DF 1 "general_operand" "f,fm,!*r,f")))]
+ (match_operand:DF 1 "nonimmediate_operand" "f,fm,!*r,f")))]
"TARGET_80387"
"*
{
@@ -1843,7 +2121,7 @@
if (NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1862,9 +2140,9 @@
}")
(define_insn "extendsfxf2"
- [(set (match_operand:XF 0 "general_operand" "=fm,f,f,!*r")
+ [(set (match_operand:XF 0 "nonimmediate_operand" "=fm,f,f,!*r")
(float_extend:XF
- (match_operand:SF 1 "general_operand" "f,fm,!*r,f")))]
+ (match_operand:SF 1 "nonimmediate_operand" "f,fm,!*r,f")))]
"TARGET_80387"
"*
{
@@ -1878,7 +2156,7 @@
if (NON_STACK_REG_P (operands[0]))
{
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
@@ -1939,7 +2217,7 @@
}")
(define_insn "truncxfsf2"
- [(set (match_operand:SF 0 "general_operand" "=m,!*r")
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=m,!*r")
(float_truncate:SF
(match_operand:XF 1 "register_operand" "f,f")))]
"TARGET_80387"
@@ -1954,7 +2232,7 @@
output_asm_insn (AS1 (fld,%y1), operands);
stack_top_dies = 1;
}
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
else if (GET_CODE (operands[0]) == MEM)
@@ -1972,7 +2250,7 @@
}")
(define_insn "truncxfdf2"
- [(set (match_operand:DF 0 "general_operand" "=m,!*r")
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=m,!*r")
(float_truncate:DF
(match_operand:XF 1 "register_operand" "f,f")))]
"TARGET_80387"
@@ -1987,7 +2265,7 @@
output_asm_insn (AS1 (fld,%y1), operands);
stack_top_dies = 1;
}
- output_to_reg (operands[0], stack_top_dies);
+ output_to_reg (operands[0], stack_top_dies, 0);
RET;
}
else if (GET_CODE (operands[0]) == MEM)
@@ -2029,7 +2307,7 @@
operands[3] = gen_lowpart (SImode, operands[2]);
operands[4] = gen_reg_rtx (XFmode);
operands[5] = (rtx) assign_386_stack_local (SImode, 0);
- operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[6] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fixuns_truncdfsi2"
@@ -2050,7 +2328,7 @@
operands[3] = gen_lowpart (SImode, operands[2]);
operands[4] = gen_reg_rtx (DFmode);
operands[5] = (rtx) assign_386_stack_local (SImode, 0);
- operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[6] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fixuns_truncsfsi2"
@@ -2071,7 +2349,7 @@
operands[3] = gen_lowpart (SImode, operands[2]);
operands[4] = gen_reg_rtx (SFmode);
operands[5] = (rtx) assign_386_stack_local (SImode, 0);
- operands[6] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[6] = (rtx) assign_386_stack_local (DImode, 1);
}")
;; Signed conversion to DImode.
@@ -2091,7 +2369,7 @@
operands[1] = copy_to_mode_reg (XFmode, operands[1]);
operands[2] = gen_reg_rtx (XFmode);
operands[3] = (rtx) assign_386_stack_local (SImode, 0);
- operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[4] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fix_truncdfdi2"
@@ -2109,7 +2387,7 @@
operands[1] = copy_to_mode_reg (DFmode, operands[1]);
operands[2] = gen_reg_rtx (DFmode);
operands[3] = (rtx) assign_386_stack_local (SImode, 0);
- operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[4] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fix_truncsfdi2"
@@ -2127,37 +2405,37 @@
operands[1] = copy_to_mode_reg (SFmode, operands[1]);
operands[2] = gen_reg_rtx (SFmode);
operands[3] = (rtx) assign_386_stack_local (SImode, 0);
- operands[4] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[4] = (rtx) assign_386_stack_local (DImode, 1);
}")
;; These match a signed conversion of either DFmode or SFmode to DImode.
(define_insn ""
- [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:XF (match_operand:XF 1 "register_operand" "f"))))
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (fix:DI (fix:XF (match_operand:XF 1 "register_operand" "+f"))))
(clobber (match_dup 1))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
(define_insn ""
- [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "+f"))))
(clobber (match_dup 1))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
(define_insn ""
- [(set (match_operand:DI 0 "general_operand" "=rm")
- (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=rm")
+ (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "+f"))))
(clobber (match_dup 1))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
@@ -2175,7 +2453,7 @@
"
{
operands[2] = (rtx) assign_386_stack_local (SImode, 0);
- operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[3] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fix_truncdfsi2"
@@ -2189,7 +2467,7 @@
"
{
operands[2] = (rtx) assign_386_stack_local (SImode, 0);
- operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[3] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_expand "fix_truncsfsi2"
@@ -2203,32 +2481,32 @@
"
{
operands[2] = (rtx) assign_386_stack_local (SImode, 0);
- operands[3] = (rtx) assign_386_stack_local (SImode, 1);
+ operands[3] = (rtx) assign_386_stack_local (DImode, 1);
}")
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=rm")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(fix:SI (fix:XF (match_operand:XF 1 "register_operand" "f"))))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=rm")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(fix:SI (fix:DF (match_operand:DF 1 "register_operand" "f"))))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=rm")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))
(clobber (match_operand:SI 2 "memory_operand" "m"))
- (clobber (match_operand:SI 3 "memory_operand" "m"))
+ (clobber (match_operand:DI 3 "memory_operand" "m"))
(clobber (match_scratch:SI 4 "=&q"))]
"TARGET_80387"
"* return output_fix_trunc (insn, operands);")
@@ -2271,14 +2549,14 @@
(define_expand "floatdixf2"
[(set (match_operand:XF 0 "register_operand" "")
(float:XF (match_operand:DI 1 "nonimmediate_operand" "")))]
- "TARGET_80387"
+ "TARGET_80387 && LONG_DOUBLE_TYPE_SIZE == 96"
"")
;; This will convert from SImode or DImode to MODE_FLOAT.
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
- (float:XF (match_operand:DI 1 "general_operand" "rm")))]
+ (float:XF (match_operand:DI 1 "nonimmediate_operand" "rm")))]
"TARGET_80387"
"*
{
@@ -2346,7 +2624,7 @@
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f,f")
- (float:XF (match_operand:SI 1 "general_operand" "m,!*r")))]
+ (float:XF (match_operand:SI 1 "nonimmediate_operand" "m,!*r")))]
"TARGET_80387"
"*
{
@@ -2380,11 +2658,137 @@
;;- add instructions
+(define_insn "addsidi3_1"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=&r,r,o,!&r,!r,o,!o")
+ (plus:DI (match_operand:DI 1 "general_operand" "0,0,0,o,riF,riF,o")
+ (zero_extend:DI (match_operand:SI 2 "general_operand" "o,ri,ri,roi,roi,ri,ri"))))
+ (clobber (match_scratch:SI 3 "=X,X,X,X,X,X,&r"))]
+ ""
+ "*
+{
+ rtx low[3], high[3], xops[7], temp;
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 2, low, high);
+ high[2] = const0_rtx;
+ low[2] = operands[2];
+
+ if (!rtx_equal_p (operands[0], operands[1]))
+ {
+ xops[0] = high[0];
+ xops[1] = low[0];
+ xops[2] = high[1];
+ xops[3] = low[1];
+
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ {
+ output_asm_insn (AS2 (mov%L1,%3,%1), xops);
+ output_asm_insn (AS2 (mov%L0,%2,%0), xops);
+ }
+ else
+ {
+ xops[4] = high[2];
+ xops[5] = low[2];
+ xops[6] = operands[3];
+ output_asm_insn (AS2 (mov%L6,%3,%6), xops);
+ output_asm_insn (AS2 (add%L6,%5,%6), xops);
+ output_asm_insn (AS2 (mov%L1,%6,%1), xops);
+ output_asm_insn (AS2 (mov%L6,%2,%6), xops);
+ output_asm_insn (AS2 (adc%L6,%4,%6), xops);
+ output_asm_insn (AS2 (mov%L0,%6,%0), xops);
+ RET;
+ }
+ }
+
+ output_asm_insn (AS2 (add%L0,%2,%0), low);
+ output_asm_insn (AS2 (adc%L0,%2,%0), high);
+ RET;
+}")
+
+(define_insn "addsidi3_2"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=&r,r,o,&r,!&r,&r,o,o,!o")
+ (plus:DI (zero_extend:DI (match_operand:SI 2 "general_operand" "o,ri,ri,o,o,ri,ri,i,r"))
+ (match_operand:DI 1 "general_operand" "0,0,0,iF,ro,roiF,riF,o,o")))
+ (clobber (match_scratch:SI 3 "=X,X,X,X,X,X,X,&r,&r"))]
+ ""
+ "*
+{
+ rtx low[3], high[3], xops[7], temp;
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 2, low, high);
+ high[2] = const0_rtx;
+ low[2] = operands[2];
+
+ if (!rtx_equal_p (operands[0], operands[1]))
+ {
+ xops[0] = high[0];
+ xops[1] = low[0];
+ xops[2] = high[1];
+ xops[3] = low[1];
+
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ {
+ if (rtx_equal_p (low[0], operands[2]))
+ {
+ output_asm_insn (AS2 (mov%L0,%2,%0), high);
+ output_asm_insn (AS2 (add%L0,%1,%0), low);
+ output_asm_insn (AS2 (adc%L0,%1,%0), high);
+ RET;
+ }
+ if (rtx_equal_p (high[0], operands[2]))
+ {
+ if (GET_CODE (operands[0]) != MEM)
+ {
+ output_asm_insn (AS2 (mov%L0,%2,%0), low);
+ output_asm_insn (AS2 (mov%L0,%2,%0), high);
+ output_asm_insn (AS2 (add%L0,%1,%0), low);
+ output_asm_insn (AS2 (adc%L0,%1,%0), high);
+ }
+ else
+ {
+ /* It's too late to ask for a scratch now - but this
+ will probably not happen too often. */
+ output_asm_insn (AS2 (add%L1,%2,%1), low);
+ output_asm_insn (AS2 (mov%L0,%1,%0), low);
+ output_asm_insn (AS2 (mov%L1,%2,%1), low);
+ output_asm_insn (AS2 (mov%L0,%2,%0), high);
+ output_asm_insn (AS2 (adc%L0,%1,%0), high);
+ output_asm_insn (AS2 (sub%L1,%0,%1), low);
+ output_asm_insn (AS1 (neg%L1,%1), low);
+ }
+ RET;
+ }
+ output_asm_insn (AS2 (mov%L1,%3,%1), xops);
+ output_asm_insn (AS2 (mov%L0,%2,%0), xops);
+ }
+ else
+ {
+ xops[4] = high[2];
+ xops[5] = low[2];
+ xops[6] = operands[3];
+ output_asm_insn (AS2 (mov%L6,%3,%6), xops);
+ output_asm_insn (AS2 (add%L6,%5,%6), xops);
+ output_asm_insn (AS2 (mov%L1,%6,%1), xops);
+ output_asm_insn (AS2 (mov%L6,%2,%6), xops);
+ output_asm_insn (AS2 (adc%L6,%4,%6), xops);
+ output_asm_insn (AS2 (mov%L0,%6,%0), xops);
+ RET;
+ }
+ }
+
+ output_asm_insn (AS2 (add%L0,%2,%0), low);
+ output_asm_insn (AS2 (adc%L0,%2,%0), high);
+ RET;
+}")
+
(define_insn "adddi3"
- [(set (match_operand:DI 0 "general_operand" "=&r,ro,o,&r,ro,o,&r,o,o,o")
- (plus:DI (match_operand:DI 1 "general_operand" "%0,0,0,o,riF,o,or,riF,riF,o")
- (match_operand:DI 2 "general_operand" "o,riF,o,0,0,0,oriF,riF,o,o")))
- (clobber (match_scratch:SI 3 "=X,X,&r,X,&r,&r,X,&r,&r,&r"))]
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,!r,o,!&r,!o,!o")
+ (plus:DI (match_operand:DI 1 "general_operand" "%0,0,0,0iF,or,riF,o")
+ (match_operand:DI 2 "general_operand" "o,riF,0,or,or,oriF,o")))
+ (clobber (match_scratch:SI 3 "=X,X,X,&r,X,&r,&r"))]
""
"*
{
@@ -2456,14 +2860,23 @@
;; On a 486, it is faster to do movl/addl than to do a single leal if
;; operands[1] and operands[2] are both registers.
-(define_insn "addsi3"
- [(set (match_operand:SI 0 "general_operand" "=?r,rm,r")
- (plus:SI (match_operand:SI 1 "general_operand" "%r,0,0")
- (match_operand:SI 2 "general_operand" "ri,ri,rm")))]
+(define_expand "addsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
""
+ "IX86_EXPAND_BINARY_OPERATOR (PLUS, SImode, operands);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm,r")
+ (plus:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0,r")
+ (match_operand:SI 2 "general_operand" "rmi,ri,ri")))]
+ "ix86_binary_operator_ok (PLUS, SImode, operands)"
"*
{
- if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
+ if (REG_P (operands[0]) && REG_P (operands[1])
+ && (REG_P (operands[2]) || GET_CODE (operands[2]) == CONST_INT)
+ && REGNO (operands[0]) != REGNO (operands[1]))
{
if (REG_P (operands[2]) && REGNO (operands[0]) == REGNO (operands[2]))
return AS2 (add%L0,%1,%0);
@@ -2483,34 +2896,85 @@
operands[1] = SET_SRC (PATTERN (insn));
return AS2 (lea%L0,%a1,%0);
}
-
- output_asm_insn (AS2 (mov%L0,%1,%0), operands);
}
+ if (!rtx_equal_p (operands[0], operands[1]))
+ output_asm_insn (AS2 (mov%L0,%1,%0), operands);
+
if (operands[2] == const1_rtx)
return AS1 (inc%L0,%0);
if (operands[2] == constm1_rtx)
return AS1 (dec%L0,%0);
+ /* subl $-128,%ebx is smaller than addl $128,%ebx. */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && INTVAL (operands[2]) == 128)
+ {
+ /* This doesn't compute the carry bit in the same way
+ * as add%L0, but we use inc and dec above and they
+ * don't set the carry bit at all. If inc/dec don't need
+ * a CC_STATUS_INIT, this doesn't either... */
+ operands[2] = GEN_INT (-128);
+ return AS2 (sub%L0,%2,%0);
+ }
+
return AS2 (add%L0,%2,%0);
}")
+;; addsi3 is faster, so put this after.
+
+(define_insn "movsi_lea"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:QI 1 "address_operand" "p"))]
+ ""
+ "*
+{
+ /* Adding a constant to a register is faster with an add. */
+ /* ??? can this ever happen? */
+ if (GET_CODE (operands[1]) == PLUS
+ && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
+ && rtx_equal_p (operands[0], XEXP (operands[1], 0)))
+ {
+ operands[1] = XEXP (operands[1], 1);
+
+ if (operands[1] == const1_rtx)
+ return AS1 (inc%L0,%0);
+
+ if (operands[1] == constm1_rtx)
+ return AS1 (dec%L0,%0);
+
+ return AS2 (add%L0,%1,%0);
+ }
+
+ CC_STATUS_INIT;
+ return AS2 (lea%L0,%a1,%0);
+}")
+
;; ??? `lea' here, for three operand add? If leaw is used, only %bx,
;; %si and %di can appear in SET_SRC, and output_asm_insn might not be
;; able to handle the operand. But leal always works?
-(define_insn "addhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm,r")
- (plus:HI (match_operand:HI 1 "general_operand" "%0,0")
- (match_operand:HI 2 "general_operand" "ri,rm")))]
+(define_expand "addhi3"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
""
+ "IX86_EXPAND_BINARY_OPERATOR (PLUS, HImode, operands);")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (plus:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:HI 2 "general_operand" "ri,rm")))]
+ "ix86_binary_operator_ok (PLUS, HImode, operands)"
"*
{
/* ??? what about offsettable memory references? */
- if (QI_REG_P (operands[0])
+ if (!TARGET_PENTIUMPRO /* partial stalls are just too painful to risk. */
+ && QI_REG_P (operands[0])
&& GET_CODE (operands[2]) == CONST_INT
- && (INTVAL (operands[2]) & 0xff) == 0)
+ && (INTVAL (operands[2]) & 0xff) == 0
+ && i386_cc_probably_useless_p (insn))
{
int byteval = (INTVAL (operands[2]) >> 8) & 0xff;
CC_STATUS_INIT;
@@ -2524,6 +2988,28 @@
return AS2 (add%B0,%2,%h0);
}
+ /* Use a 32-bit operation when possible, to avoid the prefix penalty. */
+ if (REG_P (operands[0])
+ && i386_aligned_p (operands[2])
+ && i386_cc_probably_useless_p (insn))
+ {
+ CC_STATUS_INIT;
+
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ HOST_WIDE_INT intval = 0xffff & INTVAL (operands[2]);
+
+ if (intval == 1)
+ return AS1 (inc%L0,%k0);
+
+ if (intval == 0xffff)
+ return AS1 (dec%L0,%k0);
+
+ operands[2] = i386_sext16_if_const (operands[2]);
+ }
+ return AS2 (add%L0,%k2,%k0);
+ }
+
if (operands[2] == const1_rtx)
return AS1 (inc%W0,%0);
@@ -2535,11 +3021,18 @@
return AS2 (add%W0,%2,%0);
}")
-(define_insn "addqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm,q")
- (plus:QI (match_operand:QI 1 "general_operand" "%0,0")
- (match_operand:QI 2 "general_operand" "qn,qmn")))]
+(define_expand "addqi3"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (plus:QI (match_operand:QI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
""
+ "IX86_EXPAND_BINARY_OPERATOR (PLUS, QImode, operands);")
+
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (plus:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:QI 2 "general_operand" "qn,qmn")))]
+ "ix86_binary_operator_ok (PLUS, QImode, operands)"
"*
{
if (operands[2] == const1_rtx)
@@ -2563,8 +3056,8 @@
;
;(define_insn ""
; [(set (match_operand:SI 0 "push_operand" "=<")
-; (plus:SI (match_operand:SI 1 "general_operand" "%r")
-; (match_operand:SI 2 "general_operand" "ri")))]
+; (plus:SI (match_operand:SI 1 "register_operand" "%r")
+; (match_operand:SI 2 "nonmemory_operand" "ri")))]
; ""
; "*
;{
@@ -2572,46 +3065,18 @@
; xops[0] = operands[0];
; xops[1] = operands[1];
; xops[2] = operands[2];
-; xops[3] = gen_rtx (MEM, SImode, stack_pointer_rtx);
+; xops[3] = gen_rtx_MEM (SImode, stack_pointer_rtx);
; output_asm_insn (\"push%z1 %1\", xops);
; output_asm_insn (AS2 (add%z3,%2,%3), xops);
; RET;
;}")
-;; addsi3 is faster, so put this after.
-
-(define_insn "movsi_lea"
- [(set (match_operand:SI 0 "register_operand" "=r")
- (match_operand:QI 1 "address_operand" "p"))]
- ""
- "*
-{
- CC_STATUS_INIT;
- /* Adding a constant to a register is faster with an add. */
- /* ??? can this ever happen? */
- if (GET_CODE (operands[1]) == PLUS
- && GET_CODE (XEXP (operands[1], 1)) == CONST_INT
- && rtx_equal_p (operands[0], XEXP (operands[1], 0)))
- {
- operands[1] = XEXP (operands[1], 1);
-
- if (operands[1] == const1_rtx)
- return AS1 (inc%L0,%0);
-
- if (operands[1] == constm1_rtx)
- return AS1 (dec%L0,%0);
-
- return AS2 (add%L0,%1,%0);
- }
- return AS2 (lea%L0,%a1,%0);
-}")
-
;; The patterns that match these are at the end of this file.
(define_expand "addxf3"
[(set (match_operand:XF 0 "register_operand" "")
- (plus:XF (match_operand:XF 1 "nonimmediate_operand" "")
- (match_operand:XF 2 "nonimmediate_operand" "")))]
+ (plus:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
"TARGET_80387"
"")
@@ -2631,11 +3096,59 @@
;;- subtract instructions
+(define_insn "subsidi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,&r,!&r,o,o,!o")
+ (minus:DI (match_operand:DI 1 "general_operand" "0iF,0,roiF,roiF,riF,o,o")
+ (zero_extend:DI (match_operand:SI 2 "general_operand" "o,ri,ri,o,ri,i,r"))))
+ (clobber (match_scratch:SI 3 "=X,X,X,X,X,&r,&r"))]
+ ""
+ "*
+{
+ rtx low[3], high[3], xops[7];
+
+ CC_STATUS_INIT;
+
+ split_di (operands, 2, low, high);
+ high[2] = const0_rtx;
+ low[2] = operands[2];
+
+ if (!rtx_equal_p (operands[0], operands[1]))
+ {
+ xops[0] = high[0];
+ xops[1] = low[0];
+ xops[2] = high[1];
+ xops[3] = low[1];
+
+ if (GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) != MEM)
+ {
+ output_asm_insn (AS2 (mov%L1,%3,%1), xops);
+ output_asm_insn (AS2 (mov%L0,%2,%0), xops);
+ }
+ else
+ {
+ xops[4] = high[2];
+ xops[5] = low[2];
+ xops[6] = operands[3];
+ output_asm_insn (AS2 (mov%L6,%3,%6), xops);
+ output_asm_insn (AS2 (sub%L6,%5,%6), xops);
+ output_asm_insn (AS2 (mov%L1,%6,%1), xops);
+ output_asm_insn (AS2 (mov%L6,%2,%6), xops);
+ output_asm_insn (AS2 (sbb%L6,%4,%6), xops);
+ output_asm_insn (AS2 (mov%L0,%6,%0), xops);
+ RET;
+ }
+ }
+
+ output_asm_insn (AS2 (sub%L0,%2,%0), low);
+ output_asm_insn (AS2 (sbb%L0,%2,%0), high);
+ RET;
+}")
+
(define_insn "subdi3"
- [(set (match_operand:DI 0 "general_operand" "=&r,ro,&r,o,o")
- (minus:DI (match_operand:DI 1 "general_operand" "0,0,roiF,riF,o")
- (match_operand:DI 2 "general_operand" "o,riF,roiF,riF,o")))
- (clobber (match_scratch:SI 3 "=X,X,X,&r,&r"))]
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,o,o,!&r,!o")
+ (minus:DI (match_operand:DI 1 "general_operand" "0,0,0iF,or,roiF,roiF")
+ (match_operand:DI 2 "general_operand" "or,riF,or,iF,roiF,roiF")))
+ (clobber (match_scratch:SI 3 "=X,X,&r,&r,X,&r"))]
""
"*
{
@@ -2698,33 +3211,65 @@
RET;
}")
-(define_insn "subsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm,r")
- (minus:SI (match_operand:SI 1 "general_operand" "0,0")
- (match_operand:SI 2 "general_operand" "ri,rm")))]
+(define_expand "subsi3"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "")
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "")
+ (match_operand:SI 2 "general_operand" "")))]
""
+ "IX86_EXPAND_BINARY_OPERATOR (MINUS, SImode, operands);")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (minus:SI (match_operand:SI 1 "nonimmediate_operand" "0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))]
+ "ix86_binary_operator_ok (MINUS, SImode, operands)"
"* return AS2 (sub%L0,%2,%0);")
-(define_insn "subhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm,r")
- (minus:HI (match_operand:HI 1 "general_operand" "0,0")
+(define_expand "subhi3"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "")
+ (match_operand:HI 2 "general_operand" "")))]
+ ""
+ "IX86_EXPAND_BINARY_OPERATOR (MINUS, HImode, operands);")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (minus:HI (match_operand:HI 1 "nonimmediate_operand" "0,0")
(match_operand:HI 2 "general_operand" "ri,rm")))]
+ "ix86_binary_operator_ok (MINUS, HImode, operands)"
+ "*
+{
+ if (REG_P (operands[0])
+ && i386_aligned_p (operands[2])
+ && i386_cc_probably_useless_p (insn))
+ {
+ CC_STATUS_INIT;
+ operands[2] = i386_sext16_if_const (operands[2]);
+ return AS2 (sub%L0,%k2,%k0);
+ }
+ return AS2 (sub%W0,%2,%0);
+}")
+
+(define_expand "subqi3"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (minus:QI (match_operand:QI 1 "general_operand" "")
+ (match_operand:QI 2 "general_operand" "")))]
""
- "* return AS2 (sub%W0,%2,%0);")
+ "IX86_EXPAND_BINARY_OPERATOR (MINUS, QImode, operands);")
-(define_insn "subqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm,q")
- (minus:QI (match_operand:QI 1 "general_operand" "0,0")
+(define_insn ""
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (minus:QI (match_operand:QI 1 "nonimmediate_operand" "0,0")
(match_operand:QI 2 "general_operand" "qn,qmn")))]
- ""
+ "ix86_binary_operator_ok (MINUS, QImode, operands)"
"* return AS2 (sub%B0,%2,%0);")
;; The patterns that match these are at the end of this file.
(define_expand "subxf3"
[(set (match_operand:XF 0 "register_operand" "")
- (minus:XF (match_operand:XF 1 "nonimmediate_operand" "")
- (match_operand:XF 2 "nonimmediate_operand" "")))]
+ (minus:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
"TARGET_80387"
"")
@@ -2745,22 +3290,15 @@
;;- multiply instructions
;(define_insn "mulqi3"
-; [(set (match_operand:QI 0 "general_operand" "=a")
-; (mult:QI (match_operand:QI 1 "general_operand" "%0")
-; (match_operand:QI 2 "general_operand" "qm")))]
+; [(set (match_operand:QI 0 "register_operand" "=a")
+; (mult:QI (match_operand:QI 1 "register_operand" "%0")
+; (match_operand:QI 2 "nonimmediate_operand" "qm")))]
; ""
; "imul%B0 %2,%0")
-(define_insn ""
- [(set (match_operand:HI 0 "general_operand" "=r")
- (mult:HI (match_operand:HI 1 "general_operand" "%0")
- (match_operand:HI 2 "general_operand" "r")))]
- "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80"
- "* return AS2 (imul%W0,%2,%0);")
-
(define_insn "mulhi3"
- [(set (match_operand:HI 0 "general_operand" "=r,r")
- (mult:HI (match_operand:HI 1 "general_operand" "%0,rm")
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (mult:HI (match_operand:HI 1 "nonimmediate_operand" "%0,rm")
(match_operand:HI 2 "general_operand" "g,i")))]
""
"*
@@ -2771,18 +3309,12 @@
/* Assembler has weird restrictions. */
return AS2 (imul%W0,%2,%0);
return AS3 (imul%W0,%2,%1,%0);
-}")
-
-(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=r")
- (mult:SI (match_operand:SI 1 "general_operand" "%0")
- (match_operand:SI 2 "general_operand" "r")))]
- "GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) == 0x80"
- "* return AS2 (imul%L0,%2,%0);")
+}"
+ [(set_attr "type" "imul")])
(define_insn "mulsi3"
- [(set (match_operand:SI 0 "general_operand" "=r,r")
- (mult:SI (match_operand:SI 1 "general_operand" "%0,rm")
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (mult:SI (match_operand:SI 1 "nonimmediate_operand" "%0,rm")
(match_operand:SI 2 "general_operand" "g,i")))]
""
"*
@@ -2793,35 +3325,40 @@
/* Assembler has weird restrictions. */
return AS2 (imul%L0,%2,%0);
return AS3 (imul%L0,%2,%1,%0);
-}")
+}"
+ [(set_attr "type" "imul")])
(define_insn "umulqihi3"
- [(set (match_operand:HI 0 "general_operand" "=a")
- (mult:HI (zero_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (mult:HI (zero_extend:HI (match_operand:QI 1 "register_operand" "%0"))
(zero_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))]
""
- "mul%B0 %2")
+ "mul%B0 %2"
+ [(set_attr "type" "imul")])
(define_insn "mulqihi3"
- [(set (match_operand:HI 0 "general_operand" "=a")
- (mult:HI (sign_extend:HI (match_operand:QI 1 "nonimmediate_operand" "%0"))
+ [(set (match_operand:HI 0 "register_operand" "=a")
+ (mult:HI (sign_extend:HI (match_operand:QI 1 "register_operand" "%0"))
(sign_extend:HI (match_operand:QI 2 "nonimmediate_operand" "qm"))))]
""
- "imul%B0 %2")
+ "imul%B0 %2"
+ [(set_attr "type" "imul")])
(define_insn "umulsidi3"
[(set (match_operand:DI 0 "register_operand" "=A")
(mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%0"))
(zero_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))]
"TARGET_WIDE_MULTIPLY"
- "mul%L0 %2")
+ "mul%L0 %2"
+ [(set_attr "type" "imul")])
(define_insn "mulsidi3"
[(set (match_operand:DI 0 "register_operand" "=A")
(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "%0"))
(sign_extend:DI (match_operand:SI 2 "nonimmediate_operand" "rm"))))]
"TARGET_WIDE_MULTIPLY"
- "imul%L0 %2")
+ "imul%L0 %2"
+ [(set_attr "type" "imul")])
(define_insn "umulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=d")
@@ -2830,7 +3367,8 @@
(const_int 32))))
(clobber (match_scratch:SI 3 "=a"))]
"TARGET_WIDE_MULTIPLY"
- "mul%L0 %2")
+ "mul%L0 %2"
+ [(set_attr "type" "imul")])
(define_insn "smulsi3_highpart"
[(set (match_operand:SI 0 "register_operand" "=d")
@@ -2839,27 +3377,28 @@
(const_int 32))))
(clobber (match_scratch:SI 3 "=a"))]
"TARGET_WIDE_MULTIPLY"
- "imul%L0 %2")
+ "imul%L0 %2"
+ [(set_attr "type" "imul")])
;; The patterns that match these are at the end of this file.
(define_expand "mulxf3"
[(set (match_operand:XF 0 "register_operand" "")
- (mult:XF (match_operand:XF 1 "nonimmediate_operand" "")
- (match_operand:XF 2 "nonimmediate_operand" "")))]
+ (mult:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
"TARGET_80387"
"")
(define_expand "muldf3"
[(set (match_operand:DF 0 "register_operand" "")
- (mult:DF (match_operand:DF 1 "nonimmediate_operand" "")
+ (mult:DF (match_operand:DF 1 "register_operand" "")
(match_operand:DF 2 "nonimmediate_operand" "")))]
"TARGET_80387"
"")
(define_expand "mulsf3"
[(set (match_operand:SF 0 "register_operand" "")
- (mult:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (mult:SF (match_operand:SF 1 "register_operand" "")
(match_operand:SF 2 "nonimmediate_operand" "")))]
"TARGET_80387"
"")
@@ -2867,38 +3406,39 @@
;;- divide instructions
(define_insn "divqi3"
- [(set (match_operand:QI 0 "general_operand" "=a")
- (div:QI (match_operand:HI 1 "general_operand" "0")
- (match_operand:QI 2 "general_operand" "qm")))]
+ [(set (match_operand:QI 0 "register_operand" "=a")
+ (div:QI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonimmediate_operand" "qm")))]
""
"idiv%B0 %2")
(define_insn "udivqi3"
- [(set (match_operand:QI 0 "general_operand" "=a")
- (udiv:QI (match_operand:HI 1 "general_operand" "0")
- (match_operand:QI 2 "general_operand" "qm")))]
+ [(set (match_operand:QI 0 "register_operand" "=a")
+ (udiv:QI (match_operand:HI 1 "register_operand" "0")
+ (match_operand:QI 2 "nonimmediate_operand" "qm")))]
""
- "div%B0 %2")
+ "div%B0 %2"
+ [(set_attr "type" "idiv")])
;; The patterns that match these are at the end of this file.
(define_expand "divxf3"
[(set (match_operand:XF 0 "register_operand" "")
- (div:XF (match_operand:XF 1 "nonimmediate_operand" "")
- (match_operand:XF 2 "nonimmediate_operand" "")))]
+ (div:XF (match_operand:XF 1 "register_operand" "")
+ (match_operand:XF 2 "register_operand" "")))]
"TARGET_80387"
"")
(define_expand "divdf3"
[(set (match_operand:DF 0 "register_operand" "")
- (div:DF (match_operand:DF 1 "nonimmediate_operand" "")
- (match_operand:DF 2 "nonimmediate_operand" "")))]
- "TARGET_80387"
- "")
-
+ (div:DF (match_operand:DF 1 "register_operand" "")
+ (match_operand:DF 2 "nonimmediate_operand" "")))]
+ "TARGET_80387"
+ "")
+
(define_expand "divsf3"
[(set (match_operand:SF 0 "register_operand" "")
- (div:SF (match_operand:SF 1 "nonimmediate_operand" "")
+ (div:SF (match_operand:SF 1 "register_operand" "")
(match_operand:SF 2 "nonimmediate_operand" "")))]
"TARGET_80387"
"")
@@ -2908,7 +3448,7 @@
(define_insn "divmodsi4"
[(set (match_operand:SI 0 "register_operand" "=a")
(div:SI (match_operand:SI 1 "register_operand" "0")
- (match_operand:SI 2 "general_operand" "rm")))
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
(set (match_operand:SI 3 "register_operand" "=&d")
(mod:SI (match_dup 1) (match_dup 2)))]
""
@@ -2920,22 +3460,24 @@
output_asm_insn (\"cltd\", operands);
#endif
return AS1 (idiv%L0,%2);
-}")
+}"
+ [(set_attr "type" "idiv")])
(define_insn "divmodhi4"
[(set (match_operand:HI 0 "register_operand" "=a")
(div:HI (match_operand:HI 1 "register_operand" "0")
- (match_operand:HI 2 "general_operand" "rm")))
+ (match_operand:HI 2 "nonimmediate_operand" "rm")))
(set (match_operand:HI 3 "register_operand" "=&d")
(mod:HI (match_dup 1) (match_dup 2)))]
""
- "cwtd\;idiv%W0 %2")
+ "cwtd\;idiv%W0 %2"
+ [(set_attr "type" "idiv")])
;; ??? Can we make gcc zero extend operand[0]?
(define_insn "udivmodsi4"
[(set (match_operand:SI 0 "register_operand" "=a")
(udiv:SI (match_operand:SI 1 "register_operand" "0")
- (match_operand:SI 2 "general_operand" "rm")))
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
(set (match_operand:SI 3 "register_operand" "=&d")
(umod:SI (match_dup 1) (match_dup 2)))]
""
@@ -2943,13 +3485,14 @@
{
output_asm_insn (AS2 (xor%L3,%3,%3), operands);
return AS1 (div%L0,%2);
-}")
+}"
+ [(set_attr "type" "idiv")])
;; ??? Can we make gcc zero extend operand[0]?
(define_insn "udivmodhi4"
[(set (match_operand:HI 0 "register_operand" "=a")
(udiv:HI (match_operand:HI 1 "register_operand" "0")
- (match_operand:HI 2 "general_operand" "rm")))
+ (match_operand:HI 2 "nonimmediate_operand" "rm")))
(set (match_operand:HI 3 "register_operand" "=&d")
(umod:HI (match_dup 1) (match_dup 2)))]
""
@@ -2957,7 +3500,8 @@
{
output_asm_insn (AS2 (xor%W0,%3,%3), operands);
return AS1 (div%W0,%2);
-}")
+}"
+ [(set_attr "type" "idiv")])
/*
;;this should be a valid double division which we may want to add
@@ -2965,11 +3509,12 @@
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=a")
(udiv:DI (match_operand:DI 1 "register_operand" "a")
- (match_operand:SI 2 "general_operand" "rm")))
+ (match_operand:SI 2 "nonimmediate_operand" "rm")))
(set (match_operand:SI 3 "register_operand" "=d")
(umod:SI (match_dup 1) (match_dup 2)))]
""
- "div%L0 %2,%0")
+ "div%L0 %2,%0"
+ [(set_attr "type" "idiv")])
*/
;;- and instructions
@@ -2989,21 +3534,33 @@
;; The `r' in `rm' for operand 3 looks redundant, but it causes
;; optional reloads to be generated if op 3 is a pseudo in a stack slot.
-;; ??? What if we only change one byte of an offsettable memory reference?
(define_insn "andsi3"
- [(set (match_operand:SI 0 "general_operand" "=r,r,rm,r")
- (and:SI (match_operand:SI 1 "general_operand" "%rm,qm,0,0")
- (match_operand:SI 2 "general_operand" "L,K,ri,rm")))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (and:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
+ (match_operand:SI 2 "general_operand" "ri,rm")))]
""
"*
{
- if (GET_CODE (operands[2]) == CONST_INT
- && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ HOST_WIDE_INT intval;
+ if (!rtx_equal_p (operands[0], operands[1])
+ && rtx_equal_p (operands[0], operands[2]))
{
- if (INTVAL (operands[2]) == 0xffff && REG_P (operands[0])
+ rtx tmp;
+ tmp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = tmp;
+ }
+ switch (GET_CODE (operands[2]))
+ {
+ case CONST_INT:
+ if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ break;
+ intval = INTVAL (operands[2]);
+ /* zero-extend 16->32? */
+ if (intval == 0xffff && REG_P (operands[0])
&& (! REG_P (operands[1])
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
- && (TARGET_386 || ! rtx_equal_p (operands[0], operands[1])))
+ && (!TARGET_ZERO_EXTEND_WITH_AND || ! rtx_equal_p (operands[0], operands[1])))
{
/* ??? tege: Should forget CC_STATUS only if we clobber a
remembered operand. Fix that later. */
@@ -3015,11 +3572,12 @@
#endif
}
- if (INTVAL (operands[2]) == 0xff && REG_P (operands[0])
+ /* zero extend 8->32? */
+ if (intval == 0xff && REG_P (operands[0])
&& !(REG_P (operands[1]) && NON_QI_REG_P (operands[1]))
&& (! REG_P (operands[1])
|| REGNO (operands[0]) != 0 || REGNO (operands[1]) != 0)
- && (TARGET_386 || ! rtx_equal_p (operands[0], operands[1])))
+ && (!TARGET_ZERO_EXTEND_WITH_AND || ! rtx_equal_p (operands[0], operands[1])))
{
/* ??? tege: Should forget CC_STATUS only if we clobber a
remembered operand. Fix that later. */
@@ -3031,47 +3589,110 @@
#endif
}
- if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff) == 0)
- {
- CC_STATUS_INIT;
+ /* Check partial bytes.. non-QI-regs are not available */
+ if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
+ break;
- if (INTVAL (operands[2]) == 0xffffff00)
+ /* only low byte has zero bits? */
+ if (~(intval | 0xff) == 0)
+ {
+ intval &= 0xff;
+ if (REG_P (operands[0]))
{
- operands[2] = const0_rtx;
- return AS2 (mov%B0,%2,%b0);
+ if (intval == 0)
+ {
+ CC_STATUS_INIT;
+ return AS2 (xor%B0,%b0,%b0);
+ }
+
+ /* we're better off with the 32-bit version if reg != EAX */
+ /* the value is sign-extended in 8 bits */
+ if (REGNO (operands[0]) != 0 && (intval & 0x80))
+ break;
}
- operands[2] = GEN_INT (INTVAL (operands[2]) & 0xff);
+ CC_STATUS_INIT;
+
+ operands[2] = GEN_INT (intval);
+
+ if (intval == 0)
+ return AS2 (mov%B0,%2,%b0);
+
return AS2 (and%B0,%2,%b0);
}
- if (QI_REG_P (operands[0]) && ~(INTVAL (operands[2]) | 0xff00) == 0)
+ /* only second byte has zero? */
+ if (~(intval | 0xff00) == 0)
{
CC_STATUS_INIT;
- if (INTVAL (operands[2]) == 0xffff00ff)
+ intval = (intval >> 8) & 0xff;
+ operands[2] = GEN_INT (intval);
+ if (intval == 0)
{
- operands[2] = const0_rtx;
- return AS2 (mov%B0,%2,%h0);
+ if (REG_P (operands[0]))
+ return AS2 (xor%B0,%h0,%h0);
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (mov%B0,%2,%b0);
}
- operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
- return AS2 (and%B0,%2,%h0);
+ if (REG_P (operands[0]))
+ return AS2 (and%B0,%2,%h0);
+
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ if (REG_P (operands[0]))
+ break;
+
+ /* third byte has zero bits? */
+ if (~(intval | 0xff0000) == 0)
+ {
+ intval = (intval >> 16) & 0xff;
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+byte_and_operation:
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (intval);
+ if (intval == 0)
+ return AS2 (mov%B0,%2,%b0);
+ return AS2 (and%B0,%2,%b0);
+ }
+
+ /* fourth byte has zero bits? */
+ if (~(intval | 0xff000000) == 0)
+ {
+ intval = (intval >> 24) & 0xff;
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ goto byte_and_operation;
}
- if (GET_CODE (operands[0]) == MEM && INTVAL (operands[2]) == 0xffff0000)
+ /* Low word is zero? */
+ if (intval == 0xffff0000)
{
+word_zero_and_operation:
+ CC_STATUS_INIT;
operands[2] = const0_rtx;
return AS2 (mov%W0,%2,%w0);
}
+
+ /* High word is zero? */
+ if (intval == 0x0000ffff)
+ {
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ goto word_zero_and_operation;
+ }
+
+ default:
+ break;
}
return AS2 (and%L0,%2,%0);
}")
(define_insn "andhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm,r")
- (and:HI (match_operand:HI 1 "general_operand" "%0,0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (and:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
(match_operand:HI 2 "general_operand" "ri,rm")))]
""
"*
@@ -3110,14 +3731,46 @@
operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
return AS2 (and%B0,%2,%h0);
}
+
+ /* use 32-bit ops on registers when there are no sign issues.. */
+ if (REG_P (operands[0]))
+ {
+ if (!(INTVAL (operands[2]) & ~0x7fff))
+ return AS2 (and%L0,%2,%k0);
+ }
+ }
+
+ if (REG_P (operands[0])
+ && i386_aligned_p (operands[2]))
+ {
+ CC_STATUS_INIT;
+ /* If op[2] is constant, we should zero-extend it and */
+ /* make a note that op[0] has been zero-extended, so */
+ /* that we could use 32-bit ops on it forthwith, but */
+ /* there is no such reg-note available. Instead we do */
+ /* a sign extension as that can result in shorter asm */
+ operands[2] = i386_sext16_if_const (operands[2]);
+ return AS2 (and%L0,%k2,%k0);
+ }
+
+ /* Use a 32-bit word with the upper bits set, invalidate CC */
+ if (GET_CODE (operands[2]) == CONST_INT
+ && i386_aligned_p (operands[0]))
+ {
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+ CC_STATUS_INIT;
+ val |= ~0xffff;
+ if (val != INTVAL (operands[2]))
+ operands[2] = GEN_INT (val);
+ return AS2 (and%L0,%k2,%k0);
}
return AS2 (and%W0,%2,%0);
}")
(define_insn "andqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm,q")
- (and:QI (match_operand:QI 1 "general_operand" "%0,0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (and:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
(match_operand:QI 2 "general_operand" "qn,qmn")))]
""
"* return AS2 (and%B0,%2,%0);")
@@ -3136,10 +3789,10 @@
"and%W0 %1,%0")
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=q")
+ [(set (match_operand:SI 0 "register_operand" "=q")
(and:SI
(zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "qm"))
- (match_operand:SI 2 "general_operand" "0")))]
+ (match_operand:SI 2 "register_operand" "0")))]
"GET_CODE (operands[2]) == CONST_INT
&& (unsigned int) INTVAL (operands[2]) < (1 << GET_MODE_BITSIZE (QImode))"
"and%L0 %1,%0")
@@ -3148,134 +3801,278 @@
;;- Bit set (inclusive or) instructions
-;; ??? What if we only change one byte of an offsettable memory reference?
+;; This optimizes known byte-wide operations to memory, and in some cases
+;; to QI registers.. Note that we don't want to use the QI registers too
+;; aggressively, because often the 32-bit register instruction is the same
+;; size, and likely to be faster on PentiumPro.
(define_insn "iorsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm,r")
- (ior:SI (match_operand:SI 1 "general_operand" "%0,0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (ior:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
(match_operand:SI 2 "general_operand" "ri,rm")))]
""
"*
{
- if (GET_CODE (operands[2]) == CONST_INT
- && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ HOST_WIDE_INT intval;
+ switch (GET_CODE (operands[2]))
{
- if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
- && (INTVAL (operands[2]) & ~0xff) == 0)
- {
- CC_STATUS_INIT;
+ case CONST_INT:
- if (INTVAL (operands[2]) == 0xff)
- return AS2 (mov%B0,%2,%b0);
+ if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
+ break;
- return AS2 (or%B0,%2,%b0);
+ /* don't try to optimize volatile accesses */
+ if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ break;
+
+ intval = INTVAL (operands[2]);
+ if ((intval & ~0xff) == 0)
+ {
+ if (REG_P (operands[0]))
+ {
+ /* Do low byte access only for %eax or when high bit is set */
+ if (REGNO (operands[0]) != 0 && !(intval & 0x80))
+ break;
+ }
+
+byte_or_operation:
+ CC_STATUS_INIT;
+
+ if (intval != INTVAL (operands[2]))
+ operands[2] = GEN_INT (intval);
+
+ if (intval == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
}
- if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ /* second byte? */
+ if ((intval & ~0xff00) == 0)
{
- CC_STATUS_INIT;
- operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+ intval >>= 8;
- if (INTVAL (operands[2]) == 0xff)
- return AS2 (mov%B0,%2,%h0);
+ if (REG_P (operands[0]))
+ {
+ CC_STATUS_INIT;
+ operands[2] = GEN_INT (intval);
+ if (intval == 0xff)
+ return AS2 (mov%B0,%2,%h0);
- return AS2 (or%B0,%2,%h0);
+ return AS2 (or%B0,%2,%h0);
+ }
+
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+ goto byte_or_operation;
+ }
+
+ if (REG_P (operands[0]))
+ break;
+
+ /* third byte? */
+ if ((intval & ~0xff0000) == 0)
+ {
+ intval >>= 16;
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ goto byte_or_operation;
+ }
+
+ /* fourth byte? */
+ if ((intval & ~0xff000000) == 0)
+ {
+ intval = (intval >> 24) & 0xff;
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ goto byte_or_operation;
}
+
+ default:
+ break;
}
return AS2 (or%L0,%2,%0);
}")
(define_insn "iorhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm,r")
- (ior:HI (match_operand:HI 1 "general_operand" "%0,0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (ior:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
(match_operand:HI 2 "general_operand" "ri,rm")))]
""
"*
{
- if (GET_CODE (operands[2]) == CONST_INT
- && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ HOST_WIDE_INT intval;
+ switch (GET_CODE (operands[2]))
{
- /* Can we ignore the upper byte? */
- if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
- && (INTVAL (operands[2]) & 0xff00) == 0)
- {
- CC_STATUS_INIT;
- if (INTVAL (operands[2]) & 0xffff0000)
- operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffff);
+ case CONST_INT:
- if (INTVAL (operands[2]) == 0xff)
- return AS2 (mov%B0,%2,%b0);
+ if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
+ break;
+
+ /* don't try to optimize volatile accesses */
+ if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ break;
+
+ intval = 0xffff & INTVAL (operands[2]);
- return AS2 (or%B0,%2,%b0);
+ if ((intval & 0xff00) == 0)
+ {
+ if (REG_P (operands[0]))
+ {
+ /* Do low byte access only for %eax or when high bit is set */
+ if (REGNO (operands[0]) != 0 && !(intval & 0x80))
+ break;
+ }
+
+byte_or_operation:
+ CC_STATUS_INIT;
+
+ if (intval == 0xff)
+ return AS2 (mov%B0,%2,%b0);
+
+ return AS2 (or%B0,%2,%b0);
}
- /* Can we ignore the lower byte? */
- /* ??? what about offsettable memory references? */
- if (QI_REG_P (operands[0])
- && (INTVAL (operands[2]) & 0xff) == 0)
+ /* high byte? */
+ if ((intval & 0xff) == 0)
{
- CC_STATUS_INIT;
- operands[2] = GEN_INT ((INTVAL (operands[2]) >> 8) & 0xff);
+ intval >>= 8;
+ operands[2] = GEN_INT (intval);
- if (INTVAL (operands[2]) == 0xff)
- return AS2 (mov%B0,%2,%h0);
+ if (REG_P (operands[0]))
+ {
+ CC_STATUS_INIT;
+ if (intval == 0xff)
+ return AS2 (mov%B0,%2,%h0);
+
+ return AS2 (or%B0,%2,%h0);
+ }
- return AS2 (or%B0,%2,%h0);
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+
+ goto byte_or_operation;
}
+
+ default:
+ break;
+ }
+
+ if (REG_P (operands[0])
+ && i386_aligned_p (operands[2]))
+ {
+ CC_STATUS_INIT;
+ operands[2] = i386_sext16_if_const (operands[2]);
+ return AS2 (or%L0,%k2,%k0);
+ }
+
+ if (GET_CODE (operands[2]) == CONST_INT
+ && i386_aligned_p (operands[0]))
+ {
+ CC_STATUS_INIT;
+ intval = 0xffff & INTVAL (operands[2]);
+ if (intval != INTVAL (operands[2]))
+ operands[2] = GEN_INT (intval);
+ return AS2 (or%L0,%2,%k0);
}
return AS2 (or%W0,%2,%0);
}")
(define_insn "iorqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm,q")
- (ior:QI (match_operand:QI 1 "general_operand" "%0,0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (ior:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
(match_operand:QI 2 "general_operand" "qn,qmn")))]
""
"* return AS2 (or%B0,%2,%0);")
;;- xor instructions
-;; ??? What if we only change one byte of an offsettable memory reference?
(define_insn "xorsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm,r")
- (xor:SI (match_operand:SI 1 "general_operand" "%0,0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm,r")
+ (xor:SI (match_operand:SI 1 "nonimmediate_operand" "%0,0")
(match_operand:SI 2 "general_operand" "ri,rm")))]
""
"*
{
- if (GET_CODE (operands[2]) == CONST_INT
- && ! (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0])))
+ HOST_WIDE_INT intval;
+ switch (GET_CODE (operands[2]))
{
- if ((! REG_P (operands[0]) || QI_REG_P (operands[0]))
- && (INTVAL (operands[2]) & ~0xff) == 0)
- {
- CC_STATUS_INIT;
+ case CONST_INT:
- if (INTVAL (operands[2]) == 0xff)
- return AS1 (not%B0,%b0);
+ if (REG_P (operands[0]) && ! QI_REG_P (operands[0]))
+ break;
- return AS2 (xor%B0,%2,%b0);
+ /* don't try to optimize volatile accesses */
+ if (GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))
+ break;
+
+ intval = INTVAL (operands[2]);
+ if ((intval & ~0xff) == 0)
+ {
+ if (REG_P (operands[0]))
+ {
+ /* Do low byte access only for %eax or when high bit is set */
+ if (REGNO (operands[0]) != 0 && !(intval & 0x80))
+ break;
+ }
+
+byte_xor_operation:
+ CC_STATUS_INIT;
+
+ if (intval == 0xff)
+ return AS1 (not%B0,%b0);
+
+ if (intval != INTVAL (operands[2]))
+ operands[2] = GEN_INT (intval);
+ return AS2 (xor%B0,%2,%b0);
}
- if (QI_REG_P (operands[0]) && (INTVAL (operands[2]) & ~0xff00) == 0)
+ /* second byte? */
+ if ((intval & ~0xff00) == 0)
{
- CC_STATUS_INIT;
- operands[2] = GEN_INT (INTVAL (operands[2]) >> 8);
+ intval >>= 8;
- if (INTVAL (operands[2]) == 0xff)
- return AS1 (not%B0,%h0);
+ if (REG_P (operands[0]))
+ {
+ CC_STATUS_INIT;
+ if (intval == 0xff)
+ return AS1 (not%B0,%h0);
- return AS2 (xor%B0,%2,%h0);
+ operands[2] = GEN_INT (intval);
+ return AS2 (xor%B0,%2,%h0);
+ }
+
+ operands[0] = adj_offsettable_operand (operands[0], 1);
+
+ goto byte_xor_operation;
}
+
+ if (REG_P (operands[0]))
+ break;
+
+ /* third byte? */
+ if ((intval & ~0xff0000) == 0)
+ {
+ intval >>= 16;
+ operands[0] = adj_offsettable_operand (operands[0], 2);
+ goto byte_xor_operation;
+ }
+
+ /* fourth byte? */
+ if ((intval & ~0xff000000) == 0)
+ {
+ intval = (intval >> 24) & 0xff;
+ operands[0] = adj_offsettable_operand (operands[0], 3);
+ goto byte_xor_operation;
+ }
+
+ default:
+ break;
}
return AS2 (xor%L0,%2,%0);
}")
(define_insn "xorhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm,r")
- (xor:HI (match_operand:HI 1 "general_operand" "%0,0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm,r")
+ (xor:HI (match_operand:HI 1 "nonimmediate_operand" "%0,0")
(match_operand:HI 2 "general_operand" "ri,rm")))]
""
"*
@@ -3312,16 +4109,136 @@
}
}
+ if (REG_P (operands[0])
+ && i386_aligned_p (operands[2]))
+ {
+ CC_STATUS_INIT;
+ operands[2] = i386_sext16_if_const (operands[2]);
+ return AS2 (xor%L0,%k2,%k0);
+ }
+
+ if (GET_CODE (operands[2]) == CONST_INT
+ && i386_aligned_p (operands[0]))
+ {
+ HOST_WIDE_INT intval;
+ CC_STATUS_INIT;
+ intval = 0xffff & INTVAL (operands[2]);
+ if (intval != INTVAL (operands[2]))
+ operands[2] = GEN_INT (intval);
+ return AS2 (xor%L0,%2,%k0);
+ }
+
return AS2 (xor%W0,%2,%0);
}")
(define_insn "xorqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm,q")
- (xor:QI (match_operand:QI 1 "general_operand" "%0,0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm,q")
+ (xor:QI (match_operand:QI 1 "nonimmediate_operand" "%0,0")
(match_operand:QI 2 "general_operand" "qn,qm")))]
""
"* return AS2 (xor%B0,%2,%0);")
+;; logical operations for DImode
+
+
+(define_insn "anddi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,!r,o,!&r,!o,!o")
+ (and:DI (match_operand:DI 1 "general_operand" "%0,0,0,0iF,or,riF,o")
+ (match_operand:DI 2 "general_operand" "o,riF,0,or,or,oriF,o")))
+ (clobber (match_scratch:SI 3 "=X,X,X,&r,X,&r,&r"))]
+ ""
+ "#")
+
+(define_insn "iordi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,!r,o,!&r,!o,!o")
+ (ior:DI (match_operand:DI 1 "general_operand" "%0,0,0,0iF,or,riF,o")
+ (match_operand:DI 2 "general_operand" "o,riF,0,or,or,oriF,o")))
+ (clobber (match_scratch:SI 3 "=X,X,X,&r,X,&r,&r"))]
+ ""
+ "#")
+
+(define_insn "xordi3"
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,!r,o,!&r,!o,!o")
+ (xor:DI (match_operand:DI 1 "general_operand" "%0,0,0,0iF,or,riF,o")
+ (match_operand:DI 2 "general_operand" "o,riF,0,or,or,oriF,o")))
+ (clobber (match_scratch:SI 3 "=X,X,X,&r,X,&r,&r"))]
+ ""
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "general_operand" "=&r,&ro,!r,o,!&r,!o,!o")
+ (match_operator:DI 4 "ix86_logical_operator"
+ [(match_operand:DI 1 "general_operand" "%0,0,0,0iF,or,riF,o")
+ (match_operand:DI 2 "general_operand" "o,riF,0,or,or,oriF,o")]))
+ (clobber (match_scratch:SI 3 "=X,X,X,&r,X,&r,&r"))]
+ "reload_completed"
+ [(const_int 0)]
+ "
+{
+ rtx low[3], high[3], xops[7], temp;
+ rtx (*genfunc)() = (GET_CODE (operands[4]) == AND ? gen_andsi3
+ : GET_CODE (operands[4]) == IOR ? gen_iorsi3
+ : GET_CODE (operands[4]) == XOR ? gen_xorsi3
+ : 0);
+
+ if (rtx_equal_p (operands[0], operands[2]))
+ {
+ temp = operands[1];
+ operands[1] = operands[2];
+ operands[2] = temp;
+ }
+
+ split_di (operands, 3, low, high);
+ if (!rtx_equal_p (operands[0], operands[1]))
+ {
+ xops[0] = high[0];
+ xops[1] = low[0];
+ xops[2] = high[1];
+ xops[3] = low[1];
+
+ if (GET_CODE (operands[0]) != MEM)
+ {
+ emit_insn (gen_movsi (xops[1], xops[3]));
+ emit_insn (gen_movsi (xops[0], xops[2]));
+ }
+ else
+ {
+ xops[4] = high[2];
+ xops[5] = low[2];
+ xops[6] = operands[3];
+ emit_insn (gen_movsi (xops[6], xops[3]));
+ emit_insn ((*genfunc) (xops[6], xops[6], xops[5]));
+ emit_insn (gen_movsi (xops[1], xops[6]));
+ emit_insn (gen_movsi (xops[6], xops[2]));
+ emit_insn ((*genfunc) (xops[6], xops[6], xops[4]));
+ emit_insn (gen_movsi (xops[0], xops[6]));
+ DONE;
+ }
+ }
+
+ if (GET_CODE (operands[3]) == REG && GET_CODE (operands[2]) != REG)
+ {
+ xops[0] = high[0];
+ xops[1] = low[0];
+ xops[2] = high[2];
+ xops[3] = low[2];
+ xops[4] = operands[3];
+
+ emit_insn (gen_movsi (xops[4], xops[3]));
+ emit_insn ((*genfunc) (xops[1], xops[1], xops[4]));
+ emit_insn (gen_movsi (xops[4], xops[2]));
+ emit_insn ((*genfunc) (xops[0], xops[0], xops[4]));
+ }
+
+ else
+ {
+ emit_insn ((*genfunc) (low[0], low[0], low[2]));
+ emit_insn ((*genfunc) (high[0], high[0], high[2]));
+ }
+
+ DONE;
+}")
+
;;- negation instructions
(define_insn "negdi2"
@@ -3345,50 +4262,50 @@
}")
(define_insn "negsi2"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (neg:SI (match_operand:SI 1 "general_operand" "0")))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (neg:SI (match_operand:SI 1 "nonimmediate_operand" "0")))]
""
"neg%L0 %0")
(define_insn "neghi2"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (neg:HI (match_operand:HI 1 "general_operand" "0")))]
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (neg:HI (match_operand:HI 1 "nonimmediate_operand" "0")))]
""
"neg%W0 %0")
(define_insn "negqi2"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (neg:QI (match_operand:QI 1 "general_operand" "0")))]
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (neg:QI (match_operand:QI 1 "nonimmediate_operand" "0")))]
""
"neg%B0 %0")
(define_insn "negsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
- (neg:SF (match_operand:SF 1 "general_operand" "0")))]
+ (neg:SF (match_operand:SF 1 "register_operand" "0")))]
"TARGET_80387"
"fchs")
(define_insn "negdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
- (neg:DF (match_operand:DF 1 "general_operand" "0")))]
+ (neg:DF (match_operand:DF 1 "register_operand" "0")))]
"TARGET_80387"
"fchs")
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
- (neg:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))]
+ (neg:DF (float_extend:DF (match_operand:SF 1 "register_operand" "0"))))]
"TARGET_80387"
"fchs")
(define_insn "negxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
- (neg:XF (match_operand:XF 1 "general_operand" "0")))]
+ (neg:XF (match_operand:XF 1 "register_operand" "0")))]
"TARGET_80387"
"fchs")
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
- (neg:XF (float_extend:XF (match_operand:DF 1 "general_operand" "0"))))]
+ (neg:XF (float_extend:XF (match_operand:DF 1 "register_operand" "0"))))]
"TARGET_80387"
"fchs")
@@ -3396,44 +4313,48 @@
(define_insn "abssf2"
[(set (match_operand:SF 0 "register_operand" "=f")
- (abs:SF (match_operand:SF 1 "general_operand" "0")))]
+ (abs:SF (match_operand:SF 1 "register_operand" "0")))]
"TARGET_80387"
- "fabs")
+ "fabs"
+ [(set_attr "type" "fpop")])
(define_insn "absdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
- (abs:DF (match_operand:DF 1 "general_operand" "0")))]
+ (abs:DF (match_operand:DF 1 "register_operand" "0")))]
"TARGET_80387"
- "fabs")
+ "fabs"
+ [(set_attr "type" "fpop")])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
- (abs:DF (float_extend:DF (match_operand:SF 1 "general_operand" "0"))))]
+ (abs:DF (float_extend:DF (match_operand:SF 1 "register_operand" "0"))))]
"TARGET_80387"
- "fabs")
+ "fabs"
+ [(set_attr "type" "fpop")])
(define_insn "absxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
- (abs:XF (match_operand:XF 1 "general_operand" "0")))]
+ (abs:XF (match_operand:XF 1 "register_operand" "0")))]
"TARGET_80387"
- "fabs")
+ "fabs"
+ [(set_attr "type" "fpop")])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
- (abs:XF (float_extend:XF (match_operand:DF 1 "general_operand" "0"))))]
+ (abs:XF (float_extend:XF (match_operand:DF 1 "register_operand" "0"))))]
"TARGET_80387"
- "fabs")
+ "fabs"
+ [(set_attr "type" "fpop")])
(define_insn "sqrtsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
- (sqrt:SF (match_operand:SF 1 "general_operand" "0")))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ (sqrt:SF (match_operand:SF 1 "register_operand" "0")))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387"
"fsqrt")
(define_insn "sqrtdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
- (sqrt:DF (match_operand:DF 1 "general_operand" "0")))]
+ (sqrt:DF (match_operand:DF 1 "register_operand" "0")))]
"! TARGET_NO_FANCY_MATH_387 && TARGET_80387
&& (TARGET_IEEE_FP || flag_fast_math) "
"fsqrt")
@@ -3441,14 +4362,13 @@
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(sqrt:DF (float_extend:DF
- (match_operand:SF 1 "general_operand" "0"))))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387"
"fsqrt")
(define_insn "sqrtxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
- (sqrt:XF (match_operand:XF 1 "general_operand" "0")))]
+ (sqrt:XF (match_operand:XF 1 "register_operand" "0")))]
"! TARGET_NO_FANCY_MATH_387 && TARGET_80387
&& (TARGET_IEEE_FP || flag_fast_math) "
"fsqrt")
@@ -3456,94 +4376,84 @@
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
(sqrt:XF (float_extend:XF
- (match_operand:DF 1 "general_operand" "0"))))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ (match_operand:DF 1 "register_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387"
"fsqrt")
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
(sqrt:XF (float_extend:XF
- (match_operand:SF 1 "general_operand" "0"))))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ (match_operand:SF 1 "register_operand" "0"))))]
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387"
"fsqrt")
(define_insn "sindf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(unspec:DF [(match_operand:DF 1 "register_operand" "0")] 1))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fsin")
(define_insn "sinsf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(unspec:SF [(match_operand:SF 1 "register_operand" "0")] 1))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fsin")
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(unspec:DF [(float_extend:DF
(match_operand:SF 1 "register_operand" "0"))] 1))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fsin")
(define_insn "sinxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
(unspec:XF [(match_operand:XF 1 "register_operand" "0")] 1))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fsin")
(define_insn "cosdf2"
[(set (match_operand:DF 0 "register_operand" "=f")
(unspec:DF [(match_operand:DF 1 "register_operand" "0")] 2))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fcos")
(define_insn "cossf2"
[(set (match_operand:SF 0 "register_operand" "=f")
(unspec:SF [(match_operand:SF 1 "register_operand" "0")] 2))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fcos")
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(unspec:DF [(float_extend:DF
(match_operand:SF 1 "register_operand" "0"))] 2))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fcos")
(define_insn "cosxf2"
[(set (match_operand:XF 0 "register_operand" "=f")
(unspec:XF [(match_operand:XF 1 "register_operand" "0")] 2))]
- "! TARGET_NO_FANCY_MATH_387 && TARGET_80387
- && (TARGET_IEEE_FP || flag_fast_math) "
+ "! TARGET_NO_FANCY_MATH_387 && TARGET_80387 && flag_fast_math"
"fcos")
;;- one complement instructions
(define_insn "one_cmplsi2"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (not:SI (match_operand:SI 1 "general_operand" "0")))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (not:SI (match_operand:SI 1 "nonimmediate_operand" "0")))]
""
"not%L0 %0")
(define_insn "one_cmplhi2"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (not:HI (match_operand:HI 1 "general_operand" "0")))]
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (not:HI (match_operand:HI 1 "nonimmediate_operand" "0")))]
""
"not%W0 %0")
(define_insn "one_cmplqi2"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (not:QI (match_operand:QI 1 "general_operand" "0")))]
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (not:QI (match_operand:QI 1 "nonimmediate_operand" "0")))]
""
"not%B0 %0")
@@ -3565,8 +4475,7 @@
;; separately, making all shifts emit pairs of shift double and normal
;; shift. Since sh[lr]d does not shift more than 31 bits, and we wish to
;; support a 63 bit shift, each shift where the count is in a reg expands
-;; to three pairs. If the overall shift is by N bits, then the first two
-;; pairs shift by N / 2 and the last pair by N & 1.
+;; to a pair of shifts, a branch, a shift by 32 and a label.
;; If the shift count is a constant, we need never emit more than one
;; shift pair, instead using moves and sign extension for counts greater
@@ -3596,7 +4505,7 @@
[(set (match_operand:DI 0 "register_operand" "=&r")
(ashift:DI (match_operand:DI 1 "register_operand" "0")
(match_operand:QI 2 "const_int_operand" "J")))]
- ""
+ "CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J')"
"*
{
rtx xops[4], low[1], high[1];
@@ -3631,35 +4540,29 @@
(define_insn "ashldi3_non_const_int"
[(set (match_operand:DI 0 "register_operand" "=&r")
(ashift:DI (match_operand:DI 1 "register_operand" "0")
- (match_operand:QI 2 "register_operand" "c")))
- (clobber (match_dup 2))]
+ (match_operand:QI 2 "register_operand" "c")))]
""
"*
{
- rtx xops[4], low[1], high[1];
+ rtx xops[5], low[1], high[1];
CC_STATUS_INIT;
split_di (operands, 1, low, high);
xops[0] = operands[2];
- xops[1] = const1_rtx;
+ xops[1] = GEN_INT (32);
xops[2] = low[0];
xops[3] = high[0];
-
- output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+ xops[4] = gen_label_rtx ();
output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
output_asm_insn (AS2 (sal%L2,%0,%2), xops);
- output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
- output_asm_insn (AS2 (sal%L2,%0,%2), xops);
-
- xops[1] = GEN_INT (7); /* shift count & 1 */
-
- output_asm_insn (AS2 (shr%B0,%1,%0), xops);
-
- output_asm_insn (AS3_SHIFT_DOUBLE (shld%L3,%0,%2,%3), xops);
- output_asm_insn (AS2 (sal%L2,%0,%2), xops);
-
+ output_asm_insn (AS2 (test%B0,%1,%b0), xops);
+ output_asm_insn (AS1 (je,%X4), xops);
+ output_asm_insn (AS2 (mov%L3,%2,%3), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (xops[4]));
RET;
}")
@@ -3668,15 +4571,15 @@
;; is smaller - use leal for now unless the shift count is 1.
(define_insn "ashlsi3"
- [(set (match_operand:SI 0 "general_operand" "=r,rm")
- (ashift:SI (match_operand:SI 1 "general_operand" "r,0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,rm")
+ (ashift:SI (match_operand:SI 1 "nonimmediate_operand" "r,0")
(match_operand:SI 2 "nonmemory_operand" "M,cI")))]
""
"*
{
if (REG_P (operands[0]) && REGNO (operands[0]) != REGNO (operands[1]))
{
- if (!TARGET_386 && INTVAL (operands[2]) == 1)
+ if (TARGET_DOUBLE_WITH_ADD && INTVAL (operands[2]) == 1)
{
output_asm_insn (AS2 (mov%L0,%1,%0), operands);
return AS2 (add%L0,%1,%0);
@@ -3690,8 +4593,8 @@
output_asm_insn (AS2 (mov%L0,%1,%0), operands);
operands[1] = operands[0];
}
- operands[1] = gen_rtx (MULT, SImode, operands[1],
- GEN_INT (1 << INTVAL (operands[2])));
+ operands[1] = gen_rtx_MULT (SImode, operands[1],
+ GEN_INT (1 << INTVAL (operands[2])));
return AS2 (lea%L0,%a1,%0);
}
}
@@ -3706,8 +4609,8 @@
}")
(define_insn "ashlhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (ashift:HI (match_operand:HI 1 "general_operand" "0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashift:HI (match_operand:HI 1 "nonimmediate_operand" "0")
(match_operand:HI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3722,8 +4625,8 @@
}")
(define_insn "ashlqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (ashift:QI (match_operand:QI 1 "general_operand" "0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashift:QI (match_operand:QI 1 "nonimmediate_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3759,11 +4662,36 @@
DONE;
}")
+(define_insn "ashldi3_32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,m")
+ (ashift:DI (match_operand:DI 1 "nonimmediate_operand" "ro,r")
+ (const_int 32)))]
+ ""
+ "*
+{
+ rtx low[2], high[2], xops[4];
+
+ split_di (operands, 2, low, high);
+ xops[0] = high[0];
+ xops[1] = low[1];
+ xops[2] = low[0];
+ xops[3] = const0_rtx;
+ if (!rtx_equal_p (xops[0], xops[1]))
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
+ if (GET_CODE (low[0]) == MEM)
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ else
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+
+ RET;
+}")
+
(define_insn "ashrdi3_const_int"
[(set (match_operand:DI 0 "register_operand" "=&r")
(ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
(match_operand:QI 2 "const_int_operand" "J")))]
- ""
+ "CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J')"
"*
{
rtx xops[4], low[1], high[1];
@@ -3800,41 +4728,36 @@
(define_insn "ashrdi3_non_const_int"
[(set (match_operand:DI 0 "register_operand" "=&r")
(ashiftrt:DI (match_operand:DI 1 "register_operand" "0")
- (match_operand:QI 2 "register_operand" "c")))
- (clobber (match_dup 2))]
+ (match_operand:QI 2 "register_operand" "c")))]
""
"*
{
- rtx xops[4], low[1], high[1];
+ rtx xops[5], low[1], high[1];
CC_STATUS_INIT;
split_di (operands, 1, low, high);
xops[0] = operands[2];
- xops[1] = const1_rtx;
+ xops[1] = GEN_INT (32);
xops[2] = low[0];
xops[3] = high[0];
-
- output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
+ xops[4] = gen_label_rtx ();
output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
output_asm_insn (AS2 (sar%L3,%0,%3), xops);
- output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
- output_asm_insn (AS2 (sar%L3,%0,%3), xops);
-
- xops[1] = GEN_INT (7); /* shift count & 1 */
-
- output_asm_insn (AS2 (shr%B0,%1,%0), xops);
-
- output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
- output_asm_insn (AS2 (sar%L3,%0,%3), xops);
-
+ output_asm_insn (AS2 (test%B0,%1,%b0), xops);
+ output_asm_insn (AS1 (je,%X4), xops);
+ xops[1] = GEN_INT (31);
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ output_asm_insn (AS2 (sar%L3,%1,%3), xops); /* shift by 32 */
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (xops[4]));
RET;
}")
(define_insn "ashrsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (ashiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(match_operand:SI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3846,8 +4769,8 @@
}")
(define_insn "ashrhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (ashiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (ashiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
(match_operand:HI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3859,8 +4782,8 @@
}")
(define_insn "ashrqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (ashiftrt:QI (match_operand:QI 1 "general_operand" "0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (ashiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3895,11 +4818,36 @@
DONE;
}")
+(define_insn "lshrdi3_32"
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,m")
+ (lshiftrt:DI (match_operand:DI 1 "nonimmediate_operand" "ro,r")
+ (const_int 32)))]
+ ""
+ "*
+{
+ rtx low[2], high[2], xops[4];
+
+ split_di (operands, 2, low, high);
+ xops[0] = low[0];
+ xops[1] = high[1];
+ xops[2] = high[0];
+ xops[3] = const0_rtx;
+ if (!rtx_equal_p (xops[0], xops[1]))
+ output_asm_insn (AS2 (mov%L0,%1,%0), xops);
+
+ if (GET_CODE (low[0]) == MEM)
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops);
+ else
+ output_asm_insn (AS2 (xor%L2,%2,%2), xops);
+
+ RET;
+}")
+
(define_insn "lshrdi3_const_int"
[(set (match_operand:DI 0 "register_operand" "=&r")
(lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
(match_operand:QI 2 "const_int_operand" "J")))]
- ""
+ "CONST_OK_FOR_LETTER_P (INTVAL (operands[2]), 'J')"
"*
{
rtx xops[4], low[1], high[1];
@@ -3935,41 +4883,35 @@
(define_insn "lshrdi3_non_const_int"
[(set (match_operand:DI 0 "register_operand" "=&r")
(lshiftrt:DI (match_operand:DI 1 "register_operand" "0")
- (match_operand:QI 2 "register_operand" "c")))
- (clobber (match_dup 2))]
+ (match_operand:QI 2 "register_operand" "c")))]
""
"*
{
- rtx xops[4], low[1], high[1];
+ rtx xops[5], low[1], high[1];
CC_STATUS_INIT;
split_di (operands, 1, low, high);
xops[0] = operands[2];
- xops[1] = const1_rtx;
+ xops[1] = GEN_INT (32);
xops[2] = low[0];
xops[3] = high[0];
-
- output_asm_insn (AS2 (ror%B0,%1,%0), xops); /* shift count / 2 */
-
- output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
- output_asm_insn (AS2 (shr%L3,%0,%3), xops);
- output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
- output_asm_insn (AS2 (shr%L3,%0,%3), xops);
-
- xops[1] = GEN_INT (7); /* shift count & 1 */
-
- output_asm_insn (AS2 (shr%B0,%1,%0), xops);
+ xops[4] = gen_label_rtx ();
output_asm_insn (AS3_SHIFT_DOUBLE (shrd%L2,%0,%3,%2), xops);
output_asm_insn (AS2 (shr%L3,%0,%3), xops);
-
+ output_asm_insn (AS2 (test%B0,%1,%b0), xops);
+ output_asm_insn (AS1 (je,%X4), xops);
+ output_asm_insn (AS2 (mov%L2,%3,%2), xops); /* Fast shift by 32 */
+ output_asm_insn (AS2 (xor%L3,%3,%3), xops);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (xops[4]));
RET;
}")
(define_insn "lshrsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (lshiftrt:SI (match_operand:SI 1 "general_operand" "0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(match_operand:SI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3981,8 +4923,8 @@
}")
(define_insn "lshrhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (lshiftrt:HI (match_operand:HI 1 "general_operand" "0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (lshiftrt:HI (match_operand:HI 1 "nonimmediate_operand" "0")
(match_operand:HI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -3994,8 +4936,8 @@
}")
(define_insn "lshrqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (lshiftrt:QI (match_operand:QI 1 "general_operand" "0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (lshiftrt:QI (match_operand:QI 1 "nonimmediate_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4009,8 +4951,8 @@
;;- rotate instructions
(define_insn "rotlsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (rotate:SI (match_operand:SI 1 "general_operand" "0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (rotate:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(match_operand:SI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4022,8 +4964,8 @@
}")
(define_insn "rotlhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (rotate:HI (match_operand:HI 1 "general_operand" "0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (rotate:HI (match_operand:HI 1 "nonimmediate_operand" "0")
(match_operand:HI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4035,8 +4977,8 @@
}")
(define_insn "rotlqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (rotate:QI (match_operand:QI 1 "general_operand" "0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (rotate:QI (match_operand:QI 1 "nonimmediate_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4048,8 +4990,8 @@
}")
(define_insn "rotrsi3"
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (rotatert:SI (match_operand:SI 1 "general_operand" "0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (rotatert:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(match_operand:SI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4061,8 +5003,8 @@
}")
(define_insn "rotrhi3"
- [(set (match_operand:HI 0 "general_operand" "=rm")
- (rotatert:HI (match_operand:HI 1 "general_operand" "0")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=rm")
+ (rotatert:HI (match_operand:HI 1 "nonimmediate_operand" "0")
(match_operand:HI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4074,8 +5016,8 @@
}")
(define_insn "rotrqi3"
- [(set (match_operand:QI 0 "general_operand" "=qm")
- (rotatert:QI (match_operand:QI 1 "general_operand" "0")
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=qm")
+ (rotatert:QI (match_operand:QI 1 "nonimmediate_operand" "0")
(match_operand:QI 2 "nonmemory_operand" "cI")))]
""
"*
@@ -4090,11 +5032,13 @@
;; This usually looses. But try a define_expand to recognize a few case
;; we can do efficiently, such as accessing the "high" QImode registers,
;; %ah, %bh, %ch, %dh.
+;; ??? Note this has a botch on the mode of operand 0, which needs to be
+;; fixed if this is ever enabled.
(define_insn "insv"
[(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+&r")
- (match_operand:SI 1 "general_operand" "i")
- (match_operand:SI 2 "general_operand" "i"))
- (match_operand:SI 3 "general_operand" "ri"))]
+ (match_operand:SI 1 "immediate_operand" "i")
+ (match_operand:SI 2 "immediate_operand" "i"))
+ (match_operand:SI 3 "nonmemory_operand" "ri"))]
""
"*
{
@@ -4110,7 +5054,7 @@
}
else
{
- operands[0] = gen_rtx (REG, SImode, REGNO (operands[0]));
+ operands[0] = gen_rtx_REG (SImode, REGNO (operands[0]));
if (INTVAL (operands[2]))
output_asm_insn (AS2 (ror%L0,%2,%0), operands);
output_asm_insn (AS3 (shrd%L0,%1,%3,%0), operands);
@@ -4130,7 +5074,7 @@
[(set (zero_extract:SI (match_operand:SI 0 "general_operand" "")
(match_operand:SI 1 "immediate_operand" "")
(match_operand:SI 2 "immediate_operand" ""))
- (match_operand:QI 3 "general_operand" "ri"))]
+ (match_operand:QI 3 "nonmemory_operand" "ri"))]
""
"
{
@@ -4143,22 +5087,6 @@
&& ! INTVAL (operands[1]) == 1)
FAIL;
}")
-
-;; ??? Are these constraints right?
-(define_insn ""
- [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+&qo")
- (const_int 8)
- (const_int 8))
- (match_operand:QI 1 "general_operand" "qn"))]
- ""
- "*
-{
- if (REG_P (operands[0]))
- return AS2 (mov%B0,%1,%h0);
-
- operands[0] = adj_offsettable_operand (operands[0], 1);
- return AS2 (mov%B0,%1,%0);
-}")
*/
;; On i386, the register count for a bit operation is *not* truncated,
@@ -4173,11 +5101,11 @@
;; General bit set and clear.
(define_insn ""
- [(set (zero_extract:SI (match_operand:SI 0 "general_operand" "+rm")
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+rm")
(const_int 1)
- (match_operand:SI 2 "general_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))
(match_operand:SI 3 "const_int_operand" "n"))]
- "TARGET_386 && GET_CODE (operands[2]) != CONST_INT"
+ "TARGET_USE_BIT_TEST && GET_CODE (operands[2]) != CONST_INT"
"*
{
CC_STATUS_INIT;
@@ -4191,11 +5119,11 @@
;; Bit complement. See comments on previous pattern.
;; ??? Is this really worthwhile?
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=rm")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
(xor:SI (ashift:SI (const_int 1)
- (match_operand:SI 1 "general_operand" "r"))
- (match_operand:SI 2 "general_operand" "0")))]
- "TARGET_386 && GET_CODE (operands[1]) != CONST_INT"
+ (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "nonimmediate_operand" "0")))]
+ "TARGET_USE_BIT_TEST && GET_CODE (operands[1]) != CONST_INT"
"*
{
CC_STATUS_INIT;
@@ -4204,11 +5132,11 @@
}")
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=rm")
- (xor:SI (match_operand:SI 1 "general_operand" "0")
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=rm")
+ (xor:SI (match_operand:SI 1 "nonimmediate_operand" "0")
(ashift:SI (const_int 1)
- (match_operand:SI 2 "general_operand" "r"))))]
- "TARGET_386 && GET_CODE (operands[2]) != CONST_INT"
+ (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_USE_BIT_TEST && GET_CODE (operands[2]) != CONST_INT"
"*
{
CC_STATUS_INIT;
@@ -4229,7 +5157,7 @@
(define_insn ""
[(set (cc0) (zero_extract (match_operand:SI 0 "register_operand" "r")
(const_int 1)
- (match_operand:SI 1 "general_operand" "r")))]
+ (match_operand:SI 1 "register_operand" "r")))]
"GET_CODE (operands[1]) != CONST_INT"
"*
{
@@ -4272,7 +5200,7 @@
;; The CPU may access unspecified bytes around the actual target byte.
(define_insn ""
- [(set (cc0) (zero_extract (match_operand:QI 0 "general_operand" "rm")
+ [(set (cc0) (zero_extract (match_operand:QI 0 "memory_operand" "m")
(match_operand:SI 1 "const_int_operand" "n")
(match_operand:SI 2 "const_int_operand" "n")))]
"GET_CODE (operands[0]) != MEM || ! MEM_VOLATILE_P (operands[0])"
@@ -4403,7 +5331,8 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (sete,%0);
OUTPUT_JUMP (\"setg %0\", \"seta %0\", NULL_PTR);
@@ -4435,7 +5364,8 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (sete,%0);
OUTPUT_JUMP (\"setl %0\", \"setb %0\", \"sets %0\");
@@ -4467,7 +5397,8 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (sete,%0);
OUTPUT_JUMP (\"setge %0\", \"setae %0\", \"setns %0\");
@@ -4499,7 +5430,8 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (setb,%0);
OUTPUT_JUMP (\"setle %0\", \"setbe %0\", NULL_PTR);
@@ -4553,6 +5485,14 @@
if (cc_prev_status.flags & CC_Z_IN_NOT_C)
return \"jnc %l0\";
else
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4000);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
+
return \"je %l0\";
}")
@@ -4585,6 +5525,14 @@
if (cc_prev_status.flags & CC_Z_IN_NOT_C)
return \"jc %l0\";
else
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4000);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
+
return \"jne %l0\";
}")
@@ -4607,9 +5555,17 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (je,%l0);
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", NULL_PTR);
}")
@@ -4651,9 +5607,17 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (je,%l0);
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\");
}")
@@ -4695,9 +5659,16 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (je,%l0);
-
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\");
}")
@@ -4739,8 +5710,16 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (jb,%l0);
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", NULL_PTR);
}")
@@ -4778,6 +5757,13 @@
if (cc_prev_status.flags & CC_Z_IN_NOT_C)
return \"jc %l0\";
else
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4000);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
return \"jne %l0\";
}")
@@ -4793,6 +5779,13 @@
if (cc_prev_status.flags & CC_Z_IN_NOT_C)
return \"jnc %l0\";
else
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4000);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
return \"je %l0\";
}")
@@ -4805,9 +5798,16 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (jne,%l0);
-
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
OUTPUT_JUMP (\"jle %l0\", \"jbe %l0\", NULL_PTR);
}")
@@ -4829,8 +5829,16 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (jne,%l0);
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
OUTPUT_JUMP (\"jge %l0\", \"jae %l0\", \"jns %l0\");
}")
@@ -4853,9 +5861,16 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (jne,%l0);
-
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (jne,%l0);
+ }
OUTPUT_JUMP (\"jl %l0\", \"jb %l0\", \"js %l0\");
}")
@@ -4877,9 +5892,17 @@
""
"*
{
- if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387))
+ if (TARGET_IEEE_FP && (cc_prev_status.flags & CC_IN_80387)
+ && ! (cc_prev_status.flags & CC_FCOMI))
return AS1 (jae,%l0);
+ if (cc_prev_status.flags & CC_TEST_AX)
+ {
+ operands[1] = gen_rtx_REG (SImode, 0);
+ operands[2] = GEN_INT (0x4100);
+ output_asm_insn (AS2 (testl,%2,%1), operands);
+ return AS1 (je,%l0);
+ }
OUTPUT_JUMP (\"jg %l0\", \"ja %l0\", NULL_PTR);
}")
@@ -4901,7 +5924,7 @@
"jmp %l0")
(define_insn "indirect_jump"
- [(set (pc) (match_operand:SI 0 "general_operand" "rm"))]
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))]
""
"*
{
@@ -4929,7 +5952,7 @@
(define_insn ""
[(set (pc)
(if_then_else (match_operator 0 "arithmetic_comparison_operator"
- [(plus:SI (match_operand:SI 1 "general_operand" "+r,m")
+ [(plus:SI (match_operand:SI 1 "nonimmediate_operand" "+r,m")
(match_operand:SI 2 "general_operand" "rmi,ri"))
(const_int 0)])
(label_ref (match_operand 3 "" ""))
@@ -4944,7 +5967,7 @@
if (operands[2] == constm1_rtx)
output_asm_insn (AS1 (dec%L1,%1), operands);
- else if (operands[1] == const1_rtx)
+ else if (operands[2] == const1_rtx)
output_asm_insn (AS1 (inc%L1,%1), operands);
else
@@ -4956,7 +5979,7 @@
(define_insn ""
[(set (pc)
(if_then_else (match_operator 0 "arithmetic_comparison_operator"
- [(minus:SI (match_operand:SI 1 "general_operand" "+r,m")
+ [(minus:SI (match_operand:SI 1 "nonimmediate_operand" "+r,m")
(match_operand:SI 2 "general_operand" "rmi,ri"))
(const_int 0)])
(label_ref (match_operand 3 "" ""))
@@ -4980,6 +6003,110 @@
return AS1 (%J0,%l3);
}")
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "general_operand" "+g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ operands[2] = const1_rtx;
+ output_asm_insn (AS2 (sub%L0,%2,%0), operands);
+ return \"jnc %l1\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 0 "general_operand" "+g")
+ (const_int 0))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ operands[2] = const1_rtx;
+ output_asm_insn (AS2 (sub%L0,%2,%0), operands);
+ return \"jc %l1\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ output_asm_insn (AS1 (dec%L0,%0), operands);
+ return \"jnz %l1\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 0 "general_operand" "+g")
+ (const_int 1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int -1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ output_asm_insn (AS1 (dec%L0,%0), operands);
+ return \"jz %l1\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (ne (match_operand:SI 0 "general_operand" "+g")
+ (const_int -1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ output_asm_insn (AS1 (inc%L0,%0), operands);
+ return \"jnz %l1\";
+}")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else (eq (match_operand:SI 0 "general_operand" "+g")
+ (const_int -1))
+ (label_ref (match_operand 1 "" ""))
+ (pc)))
+ (set (match_dup 0)
+ (plus:SI (match_dup 0)
+ (const_int 1)))]
+ ""
+ "*
+{
+ CC_STATUS_INIT;
+ output_asm_insn (AS1 (inc%L0,%0), operands);
+ return \"jz %l1\";
+}")
+
;; Implement switch statements when generating PIC code. Switches are
;; implemented by `tablejump' when not using -fpic.
@@ -4987,10 +6114,12 @@
(define_expand "casesi"
[(set (match_dup 5)
- (minus:SI (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 0 "general_operand" ""))
+ (set (match_dup 6)
+ (minus:SI (match_dup 5)
(match_operand:SI 1 "general_operand" "")))
(set (cc0)
- (compare:CC (match_dup 5)
+ (compare:CC (match_dup 6)
(match_operand:SI 2 "general_operand" "")))
(set (pc)
(if_then_else (gtu (cc0)
@@ -5000,14 +6129,15 @@
(parallel
[(set (pc)
(minus:SI (reg:SI 3)
- (mem:SI (plus:SI (mult:SI (match_dup 5)
+ (mem:SI (plus:SI (mult:SI (match_dup 6)
(const_int 4))
(label_ref (match_operand 3 "" ""))))))
- (clobber (match_scratch:SI 6 ""))])]
+ (clobber (match_scratch:SI 7 ""))])]
"flag_pic"
"
{
operands[5] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
current_function_uses_pic_offset_table = 1;
}")
@@ -5061,12 +6191,12 @@
output_asm_insn (AS2 (mov%L2,%3,%2), xops);
output_asm_insn (\"sub%L2 %l1@GOTOFF(%3,%0,4),%2\", xops);
output_asm_insn (AS1 (jmp,%*%2), xops);
- ASM_OUTPUT_ALIGN_CODE (asm_out_file);
+ ASM_OUTPUT_ALIGN (asm_out_file, i386_align_jumps);
RET;
}")
(define_insn "tablejump"
- [(set (pc) (match_operand:SI 0 "general_operand" "rm"))
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "rm"))
(use (label_ref (match_operand 1 "" "")))]
""
"*
@@ -5312,9 +6442,10 @@
coprocessor registers as containing a possible return value,
simply pretend the untyped call returns a complex long double
value. */
+
emit_call_insn (TARGET_80387
- ? gen_call_value (gen_rtx (REG, XCmode, FIRST_FLOAT_REG),
- operands[0], const0_rtx)
+ ? gen_call_value (gen_rtx_REG (XCmode, FIRST_FLOAT_REG),
+ operands[0], const0_rtx)
: gen_call (operands[0], const0_rtx));
for (i = 0; i < XVECLEN (operands[2], 0); i++)
@@ -5344,19 +6475,148 @@
;; This is only done if the function's epilogue is known to be simple.
;; See comments for simple_386_epilogue in i386.c.
-(define_insn "return"
+(define_expand "return"
[(return)]
- "simple_386_epilogue ()"
+ "ix86_can_use_return_insn_p ()"
+ "")
+
+(define_insn "return_internal"
+ [(return)]
+ "reload_completed"
+ "ret")
+
+(define_insn "return_pop_internal"
+ [(return)
+ (use (match_operand:SI 0 "const_int_operand" ""))]
+ "reload_completed"
+ "ret %0")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "
+{
+ ix86_expand_prologue ();
+ DONE;
+}")
+
+;; The use of UNSPEC here is currently not necessary - a simple SET of ebp
+;; to itself would be enough. But this way we are safe even if some optimizer
+;; becomes too clever in the future.
+(define_insn "prologue_set_stack_ptr"
+ [(set (reg:SI 7)
+ (minus:SI (reg:SI 7) (match_operand:SI 0 "immediate_operand" "i")))
+ (set (reg:SI 6) (unspec:SI [(reg:SI 6)] 4))]
+ ""
"*
{
- function_epilogue (asm_out_file, get_frame_size ());
+ rtx xops [2];
+
+ xops[0] = operands[0];
+ xops[1] = stack_pointer_rtx;
+ output_asm_insn (AS2 (sub%L1,%0,%1), xops);
RET;
}")
-(define_insn "nop"
- [(const_int 0)]
+(define_insn "prologue_set_got"
+ [(set (match_operand:SI 0 "" "")
+ (unspec_volatile
+ [(plus:SI (match_dup 0)
+ (plus:SI (match_operand:SI 1 "symbolic_operand" "")
+ (minus:SI (pc) (match_operand 2 "" ""))))] 1))]
""
- "nop")
+ "*
+{
+ char buffer[64];
+
+ if (TARGET_DEEP_BRANCH_PREDICTION)
+ {
+ sprintf (buffer, \"addl %s,%%0\", XSTR (operands[1], 0));
+ output_asm_insn (buffer, operands);
+ }
+ else
+ {
+ sprintf (buffer, \"addl %s+[.-%%X2],%%0\", XSTR (operands[1], 0));
+ output_asm_insn (buffer, operands);
+ }
+ RET;
+}")
+
+(define_insn "prologue_get_pc"
+ [(set (match_operand:SI 0 "" "")
+ (unspec_volatile [(plus:SI (pc) (match_operand 1 "" ""))] 2))]
+ ""
+ "*
+{
+ char buffer[64];
+
+ output_asm_insn (AS1 (call,%X1), operands);
+ if (! TARGET_DEEP_BRANCH_PREDICTION)
+ {
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\", CODE_LABEL_NUMBER (operands[1]));
+ }
+ RET;
+}")
+
+(define_insn "prologue_get_pc_and_set_got"
+ [(unspec_volatile [(match_operand:SI 0 "" "")] 3)]
+ ""
+ "*
+{
+ operands[1] = gen_label_rtx ();
+ output_asm_insn (AS1 (call,%X1), operands);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\",
+ CODE_LABEL_NUMBER (operands[1]));
+ output_asm_insn (AS1 (pop%L0,%0), operands);
+ output_asm_insn (\"addl $_GLOBAL_OFFSET_TABLE_+[.-%X1],%0\", operands);
+ RET;
+}")
+
+(define_expand "epilogue"
+ [(const_int 1)]
+ ""
+ "
+{
+ ix86_expand_epilogue ();
+ DONE;
+}")
+
+(define_insn "epilogue_set_stack_ptr"
+ [(set (reg:SI 7) (reg:SI 6))
+ (clobber (reg:SI 6))]
+ ""
+ "*
+{
+ rtx xops [2];
+
+ xops[0] = frame_pointer_rtx;
+ xops[1] = stack_pointer_rtx;
+ output_asm_insn (AS2 (mov%L0,%0,%1), xops);
+ RET;
+}")
+
+(define_insn "leave"
+ [(const_int 2)
+ (clobber (reg:SI 6))
+ (clobber (reg:SI 7))]
+ ""
+ "leave")
+
+(define_insn "pop"
+ [(set (match_operand:SI 0 "register_operand" "r")
+ (mem:SI (reg:SI 7)))
+ (set (reg:SI 7) (plus:SI (reg:SI 7) (const_int 4)))]
+ ""
+ "*
+{
+ output_asm_insn (AS1 (pop%L0,%P0), operands);
+ RET;
+}")
(define_expand "movstrsi"
[(parallel [(set (match_operand:BLK 0 "memory_operand" "")
@@ -5380,8 +6640,8 @@
operands[5] = addr0;
operands[6] = addr1;
- operands[0] = gen_rtx (MEM, BLKmode, addr0);
- operands[1] = gen_rtx (MEM, BLKmode, addr1);
+ operands[0] = change_address (operands[0], VOIDmode, addr0);
+ operands[1] = change_address (operands[1], VOIDmode, addr1);
}")
;; It might seem that operands 0 & 1 could use predicate register_operand.
@@ -5426,6 +6686,73 @@
RET;
}")
+(define_expand "clrstrsi"
+ [(set (match_dup 3) (const_int 0))
+ (parallel [(set (match_operand:BLK 0 "memory_operand" "")
+ (const_int 0))
+ (use (match_operand:SI 1 "const_int_operand" ""))
+ (use (match_operand:SI 2 "const_int_operand" ""))
+ (use (match_dup 3))
+ (clobber (match_scratch:SI 4 ""))
+ (clobber (match_dup 5))])]
+ ""
+ "
+{
+ rtx addr0, addr1;
+
+ if (GET_CODE (operands[1]) != CONST_INT)
+ FAIL;
+
+ addr0 = copy_to_mode_reg (Pmode, XEXP (operands[0], 0));
+
+ operands[3] = gen_reg_rtx (SImode);
+ operands[5] = addr0;
+
+ operands[0] = gen_rtx_MEM (BLKmode, addr0);
+}")
+
+;; It might seem that operand 0 could use predicate register_operand.
+;; But strength reduction might offset the MEM expression. So we let
+;; reload put the address into %edi.
+
+(define_insn ""
+ [(set (mem:BLK (match_operand:SI 0 "address_operand" "D"))
+ (const_int 0))
+ (use (match_operand:SI 1 "const_int_operand" "n"))
+ (use (match_operand:SI 2 "immediate_operand" "i"))
+ (use (match_operand:SI 3 "register_operand" "a"))
+ (clobber (match_scratch:SI 4 "=&c"))
+ (clobber (match_dup 0))]
+ ""
+ "*
+{
+ rtx xops[2];
+
+ output_asm_insn (\"cld\", operands);
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ if (INTVAL (operands[1]) & ~0x03)
+ {
+ xops[0] = GEN_INT ((INTVAL (operands[1]) >> 2) & 0x3fffffff);
+ xops[1] = operands[4];
+
+ output_asm_insn (AS2 (mov%L1,%0,%1), xops);
+#ifdef INTEL_SYNTAX
+ output_asm_insn (\"rep stosd\", xops);
+#else
+ output_asm_insn (\"rep\;stosl\", xops);
+#endif
+ }
+ if (INTVAL (operands[1]) & 0x02)
+ output_asm_insn (\"stosw\", operands);
+ if (INTVAL (operands[1]) & 0x01)
+ output_asm_insn (\"stosb\", operands);
+ }
+ else
+ abort ();
+ RET;
+}")
+
(define_expand "cmpstrsi"
[(parallel [(set (match_operand:SI 0 "general_operand" "")
(compare:SI (match_operand:BLK 1 "general_operand" "")
@@ -5447,8 +6774,8 @@
operands[5] = addr1;
operands[6] = addr2;
- operands[1] = gen_rtx (MEM, BLKmode, addr1);
- operands[2] = gen_rtx (MEM, BLKmode, addr2);
+ operands[1] = gen_rtx_MEM (BLKmode, addr1);
+ operands[2] = gen_rtx_MEM (BLKmode, addr2);
}")
@@ -5464,7 +6791,7 @@
;; code to handle zero-length compares.
(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=&r")
+ [(set (match_operand:SI 0 "register_operand" "=&r")
(compare:SI (mem:BLK (match_operand:SI 1 "address_operand" "S"))
(mem:BLK (match_operand:SI 2 "address_operand" "D"))))
(use (match_operand:SI 3 "register_operand" "c"))
@@ -5475,7 +6802,7 @@
""
"*
{
- rtx xops[4], label;
+ rtx xops[2], label;
label = gen_label_rtx ();
@@ -5485,16 +6812,13 @@
output_asm_insn (\"je %l0\", &label);
xops[0] = operands[0];
- xops[1] = gen_rtx (MEM, QImode,
- gen_rtx (PLUS, SImode, operands[1], constm1_rtx));
- xops[2] = gen_rtx (MEM, QImode,
- gen_rtx (PLUS, SImode, operands[2], constm1_rtx));
- xops[3] = operands[3];
-
- output_asm_insn (AS2 (movz%B1%L0,%1,%0), xops);
- output_asm_insn (AS2 (movz%B2%L3,%2,%3), xops);
-
- output_asm_insn (AS2 (sub%L0,%3,%0), xops);
+ xops[1] = const1_rtx;
+ output_asm_insn (AS2 (sbb%L0,%0,%0), xops);
+ if (QI_REG_P (xops[0]))
+ output_asm_insn (AS2 (or%B0,%1,%b0), xops);
+ else
+ output_asm_insn (AS2 (or%L0,%1,%0), xops);
+
ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, \"L\", CODE_LABEL_NUMBER (label));
RET;
}")
@@ -5515,7 +6839,7 @@
cc_status.flags |= CC_NOT_SIGNED;
- xops[0] = gen_rtx (REG, QImode, 0);
+ xops[0] = gen_rtx_REG (QImode, 0);
xops[1] = CONST0_RTX (QImode);
output_asm_insn (\"cld\", operands);
@@ -5523,101 +6847,64 @@
return \"repz\;cmps%B2\";
}")
-(define_expand "ffssi2"
- [(set (match_dup 2)
- (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" ""))
- (const_int -1)))
- (set (match_operand:SI 0 "general_operand" "")
- (plus:SI (match_dup 2) (const_int 1)))]
- ""
- "operands[2] = gen_reg_rtx (SImode);")
-
+
;; Note, you cannot optimize away the branch following the bsfl by assuming
;; that the destination is not modified if the input is 0, since not all
;; x86 implementations do this.
-(define_insn ""
- [(set (match_operand:SI 0 "general_operand" "=&r")
- (plus:SI (ffs:SI (match_operand:SI 1 "general_operand" "rm"))
- (const_int -1)))]
+(define_expand "ffssi2"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (ffs:SI (match_operand:SI 1 "general_operand" "")))]
""
- "*
+ "
{
- rtx xops[3];
- static int ffssi_label_number;
- char buffer[30];
+ rtx label = gen_label_rtx (), temp = gen_reg_rtx (SImode);
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = constm1_rtx;
- output_asm_insn (AS2 (bsf%L0,%1,%0), xops);
-#ifdef LOCAL_LABEL_PREFIX
- sprintf (buffer, \"jnz %sLFFSSI%d\",
- LOCAL_LABEL_PREFIX, ffssi_label_number);
-#else
- sprintf (buffer, \"jnz %sLFFSSI%d\",
- \"\", ffssi_label_number);
-#endif
- output_asm_insn (buffer, xops);
- output_asm_insn (AS2 (mov%L0,%2,%0), xops);
-#ifdef LOCAL_LABEL_PREFIX
- sprintf (buffer, \"%sLFFSSI%d:\",
- LOCAL_LABEL_PREFIX, ffssi_label_number);
-#else
- sprintf (buffer, \"%sLFFSSI%d:\",
- \"\", ffssi_label_number);
-#endif
- output_asm_insn (buffer, xops);
+ emit_insn (gen_ffssi_1 (temp, operands[1]));
+ emit_cmp_insn (operands[1], const0_rtx, NE, NULL_RTX, SImode, 0, 0);
+ emit_jump_insn (gen_bne (label));
+ emit_move_insn (temp, constm1_rtx);
+ emit_label (label);
+ temp = expand_binop (SImode, add_optab, temp, const1_rtx,
+ operands[0], 0, OPTAB_WIDEN);
- ffssi_label_number++;
- return \"\";
+ if (temp != operands[0])
+ emit_move_insn (operands[0], temp);
+ DONE;
}")
-(define_expand "ffshi2"
- [(set (match_dup 2)
- (plus:HI (ffs:HI (match_operand:HI 1 "general_operand" ""))
- (const_int -1)))
- (set (match_operand:HI 0 "general_operand" "")
- (plus:HI (match_dup 2) (const_int 1)))]
+(define_insn "ffssi_1"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:SI 1 "nonimmediate_operand" "rm")] 5))]
""
- "operands[2] = gen_reg_rtx (HImode);")
+ "* return AS2 (bsf%L0,%1,%0);")
-(define_insn ""
- [(set (match_operand:HI 0 "general_operand" "=&r")
- (plus:HI (ffs:HI (match_operand:SI 1 "general_operand" "rm"))
- (const_int -1)))]
+(define_expand "ffshi2"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (ffs:HI (match_operand:HI 1 "general_operand" "")))]
""
- "*
+ "
{
- rtx xops[3];
- static int ffshi_label_number;
- char buffer[30];
+ rtx label = gen_label_rtx (), temp = gen_reg_rtx (HImode);
- xops[0] = operands[0];
- xops[1] = operands[1];
- xops[2] = constm1_rtx;
- output_asm_insn (AS2 (bsf%W0,%1,%0), xops);
-#ifdef LOCAL_LABEL_PREFIX
- sprintf (buffer, \"jnz %sLFFSHI%d\",
- LOCAL_LABEL_PREFIX, ffshi_label_number);
-#else
- sprintf (buffer, \"jnz %sLFFSHI%d\",
- \"\", ffshi_label_number);
-#endif
- output_asm_insn (buffer, xops);
- output_asm_insn (AS2 (mov%W0,%2,%0), xops);
-#ifdef LOCAL_LABEL_PREFIX
- sprintf (buffer, \"%sLFFSHI%d:\",
- LOCAL_LABEL_PREFIX, ffshi_label_number);
-#else
- sprintf (buffer, \"%sLFFSHI%d:\",
- \"\", ffshi_label_number);
-#endif
- output_asm_insn (buffer, xops);
+ emit_insn (gen_ffshi_1 (temp, operands[1]));
+ emit_cmp_insn (operands[1], const0_rtx, NE, NULL_RTX, HImode, 0, 0);
+ emit_jump_insn (gen_bne (label));
+ emit_move_insn (temp, constm1_rtx);
+ emit_label (label);
+ temp = expand_binop (HImode, add_optab, temp, const1_rtx,
+ operands[0], 0, OPTAB_WIDEN);
- ffshi_label_number++;
- return \"\";
+ if (temp != operands[0])
+ emit_move_insn (operands[0], temp);
+ DONE;
}")
+
+(define_insn "ffshi_1"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (unspec:HI [(match_operand:SI 1 "nonimmediate_operand" "rm")] 5))]
+ ""
+ "* return AS2 (bsf%W0,%1,%0);")
;; These patterns match the binary 387 instructions for addM3, subM3,
;; mulM3 and divM3. There are three patterns for each of DFmode and
@@ -5633,81 +6920,171 @@
[(match_operand:DF 1 "nonimmediate_operand" "0,fm")
(match_operand:DF 2 "nonimmediate_operand" "fm,0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(match_operator:DF 3 "binary_387_op"
- [(float:DF (match_operand:SI 1 "general_operand" "rm"))
- (match_operand:DF 2 "general_operand" "0")]))]
+ [(float:DF (match_operand:SI 1 "nonimmediate_operand" "rm"))
+ (match_operand:DF 2 "register_operand" "0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f,f")
(match_operator:XF 3 "binary_387_op"
- [(match_operand:XF 1 "nonimmediate_operand" "0,f")
- (match_operand:XF 2 "nonimmediate_operand" "f,0")]))]
+ [(match_operand:XF 1 "register_operand" "0,f")
+ (match_operand:XF 2 "register_operand" "f,0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
(match_operator:XF 3 "binary_387_op"
- [(float:XF (match_operand:SI 1 "general_operand" "rm"))
- (match_operand:XF 2 "general_operand" "0")]))]
+ [(float:XF (match_operand:SI 1 "nonimmediate_operand" "rm"))
+ (match_operand:XF 2 "register_operand" "0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f,f")
(match_operator:XF 3 "binary_387_op"
- [(float_extend:XF (match_operand:SF 1 "general_operand" "fm,0"))
- (match_operand:XF 2 "general_operand" "0,f")]))]
+ [(float_extend:XF (match_operand:SF 1 "nonimmediate_operand" "fm,0"))
+ (match_operand:XF 2 "register_operand" "0,f")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f")
(match_operator:XF 3 "binary_387_op"
- [(match_operand:XF 1 "general_operand" "0")
- (float:XF (match_operand:SI 2 "general_operand" "rm"))]))]
+ [(match_operand:XF 1 "register_operand" "0")
+ (float:XF (match_operand:SI 2 "nonimmediate_operand" "rm"))]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:XF 0 "register_operand" "=f,f")
(match_operator:XF 3 "binary_387_op"
- [(match_operand:XF 1 "general_operand" "0,f")
+ [(match_operand:XF 1 "register_operand" "0,f")
(float_extend:XF
- (match_operand:SF 2 "general_operand" "fm,0"))]))]
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0"))]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
(match_operator:DF 3 "binary_387_op"
- [(float_extend:DF (match_operand:SF 1 "general_operand" "fm,0"))
- (match_operand:DF 2 "general_operand" "0,f")]))]
+ [(float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "fm,0"))
+ (match_operand:DF 2 "register_operand" "0,f")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f")
(match_operator:DF 3 "binary_387_op"
- [(match_operand:DF 1 "general_operand" "0")
- (float:DF (match_operand:SI 2 "general_operand" "rm"))]))]
+ [(match_operand:DF 1 "register_operand" "0")
+ (float:DF (match_operand:SI 2 "nonimmediate_operand" "rm"))]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:DF 0 "register_operand" "=f,f")
(match_operator:DF 3 "binary_387_op"
- [(match_operand:DF 1 "general_operand" "0,f")
+ [(match_operand:DF 1 "register_operand" "0,f")
(float_extend:DF
- (match_operand:SF 2 "general_operand" "fm,0"))]))]
+ (match_operand:SF 2 "nonimmediate_operand" "fm,0"))]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f,f")
@@ -5715,38 +7092,106 @@
[(match_operand:SF 1 "nonimmediate_operand" "0,fm")
(match_operand:SF 2 "nonimmediate_operand" "fm,0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f")
(match_operator:SF 3 "binary_387_op"
- [(float:SF (match_operand:SI 1 "general_operand" "rm"))
- (match_operand:SF 2 "general_operand" "0")]))]
+ [(float:SF (match_operand:SI 1 "nonimmediate_operand" "rm"))
+ (match_operand:SF 2 "register_operand" "0")]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_insn ""
[(set (match_operand:SF 0 "register_operand" "=f")
(match_operator:SF 3 "binary_387_op"
- [(match_operand:SF 1 "general_operand" "0")
- (float:SF (match_operand:SI 2 "general_operand" "rm"))]))]
+ [(match_operand:SF 1 "register_operand" "0")
+ (float:SF (match_operand:SI 2 "nonimmediate_operand" "rm"))]))]
"TARGET_80387"
- "* return output_387_binary_op (insn, operands);")
+ "* return output_387_binary_op (insn, operands);"
+ [(set (attr "type")
+ (cond [(match_operand:DF 3 "is_mul" "")
+ (const_string "fpmul")
+ (match_operand:DF 3 "is_div" "")
+ (const_string "fpdiv")
+ ]
+ (const_string "fpop")
+ )
+ )])
(define_expand "strlensi"
[(parallel [(set (match_dup 4)
(unspec:SI [(mem:BLK (match_operand:BLK 1 "general_operand" ""))
- (match_operand:QI 2 "register_operand" "")
+ (match_operand:QI 2 "immediate_operand" "")
(match_operand:SI 3 "immediate_operand" "")] 0))
(clobber (match_dup 1))])
(set (match_dup 5)
(not:SI (match_dup 4)))
(set (match_operand:SI 0 "register_operand" "")
- (minus:SI (match_dup 5)
- (const_int 1)))]
+ (plus:SI (match_dup 5)
+ (const_int -1)))]
""
"
{
+ if (TARGET_UNROLL_STRLEN && operands[2] == const0_rtx && optimize > 1)
+ {
+ rtx address;
+ rtx scratch;
+
+ /* well it seems that some optimizer does not combine a call like
+ foo(strlen(bar), strlen(bar));
+ when the move and the subtraction is done here. It does calculate
+ the length just once when these instructions are done inside of
+ output_strlen_unroll(). But I think since &bar[strlen(bar)] is
+ often used and I use one fewer register for the lifetime of
+ output_strlen_unroll() this is better. */
+ scratch = gen_reg_rtx (SImode);
+ address = force_reg (SImode, XEXP (operands[1], 0));
+
+ /* move address to scratch-register
+ this is done here because the i586 can do the following and
+ in the same cycle with the following move. */
+ if (GET_CODE (operands[3]) != CONST_INT || INTVAL (operands[3]) < 4)
+ emit_insn (gen_movsi (scratch, address));
+
+ emit_insn (gen_movsi (operands[0], address));
+
+ if(TARGET_USE_Q_REG)
+ emit_insn (gen_strlensi_unroll5 (operands[0],
+ operands[3],
+ scratch,
+ operands[0]));
+ else
+ emit_insn (gen_strlensi_unroll4 (operands[0],
+ operands[3],
+ scratch,
+ operands[0]));
+
+ /* gen_strlensi_unroll[45] returns the address of the zero
+ at the end of the string, like memchr(), so compute the
+ length by subtracting the startaddress. */
+ emit_insn (gen_subsi3 (operands[0], operands[0], address));
+ DONE;
+ }
+
operands[1] = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
operands[4] = gen_reg_rtx (SImode);
operands[5] = gen_reg_rtx (SImode);
@@ -5759,7 +7204,7 @@
(define_insn ""
[(set (match_operand:SI 0 "register_operand" "=&c")
(unspec:SI [(mem:BLK (match_operand:SI 1 "address_operand" "D"))
- (match_operand:QI 2 "register_operand" "a")
+ (match_operand:QI 2 "immediate_operand" "a")
(match_operand:SI 3 "immediate_operand" "i")] 0))
(clobber (match_dup 1))]
""
@@ -5773,3 +7218,601 @@
output_asm_insn (AS2 (mov%L0,%1,%0), xops);
return \"repnz\;scas%B2\";
}")
+
+/* Conditional move define_insns. */
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "nonimmediate_operand" "")
+ (match_operand:SI 3 "nonimmediate_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:SI 4 "nonimmediate_operand" "rm,rm,0,0")
+ (match_operand:SI 5 "nonimmediate_operand" "0,0,rm,rm")))]
+ "TARGET_CMOVE"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:SI 4 "nonimmediate_operand" "rm,rm,0,0")
+ (match_operand:SI 5 "nonimmediate_operand" "0,0,rm,rm")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT"
+ "#")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:SI 3 "nonimmediate_operand" "rm,0")
+ (match_operand:SI 4 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:SI 4 "nonimmediate_operand" "rm,0")
+ (match_operand:SI 5 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0) (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SI 2 "nonimmediate_operand" "rm,0")
+ (match_operand:SI 3 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_int_conditional_move (which_alternative, operands);")
+
+(define_expand "movhicc"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (if_then_else:HI (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "nonimmediate_operand" "")
+ (match_operand:HI 3 "nonimmediate_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:HI 4 "nonimmediate_operand" "rm,rm,0,0")
+ (match_operand:HI 5 "nonimmediate_operand" "0,0,rm,rm")))]
+ "TARGET_CMOVE"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r,r,r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:HI 4 "nonimmediate_operand" "rm,rm,0,0")
+ (match_operand:HI 5 "nonimmediate_operand" "0,0,rm,rm")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT"
+ "#")
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:HI 3 "nonimmediate_operand" "rm,0")
+ (match_operand:HI 4 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:HI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:HI 4 "nonimmediate_operand" "rm,0")
+ (match_operand:HI 5 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:HI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:HI 2 "nonimmediate_operand" "rm,0")
+ (match_operand:HI 3 "nonimmediate_operand" "0,rm")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_int_conditional_move (which_alternative, operands);")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ rtx temp;
+
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ /* The floating point conditional move instructions don't directly
+ support conditions resulting from a signed integer comparison. */
+
+ switch (GET_CODE (operands[1]))
+ {
+ case LT:
+ case LE:
+ case GE:
+ case GT:
+ temp = emit_store_flag (gen_reg_rtx (QImode),
+ GET_CODE (operands[1]), i386_compare_op0, i386_compare_op1,
+ VOIDmode, 0, 0);
+
+ if (!temp)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (NE, QImode, temp, const0_rtx);
+ break;
+
+ default:
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+ break;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:SF 4 "register_operand" "f,f,0,0")
+ (match_operand:SF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:SF 4 "register_operand" "f,f,0,0")
+ (match_operand:SF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:SF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:SF 4 "register_operand" "f,0")
+ (match_operand:SF 5 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0) (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:SF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:SF 2 "register_operand" "f,0")
+ (match_operand:SF 3 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_fp_conditional_move (which_alternative, operands);")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ rtx temp;
+
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ /* The floating point conditional move instructions don't directly
+ support conditions resulting from a signed integer comparison. */
+
+ switch (GET_CODE (operands[1]))
+ {
+ case LT:
+ case LE:
+ case GE:
+ case GT:
+ temp = emit_store_flag (gen_reg_rtx (QImode),
+ GET_CODE (operands[1]), i386_compare_op0, i386_compare_op1,
+ VOIDmode, 0, 0);
+
+ if (!temp)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (NE, QImode, temp, const0_rtx);
+ break;
+
+ default:
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+ break;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:DF 4 "register_operand" "f,f,0,0")
+ (match_operand:DF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:DF 4 "register_operand" "f,f,0,0")
+ (match_operand:DF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:DF 3 "register_operand" "f,0")
+ (match_operand:DF 4 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:DF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:DF 4 "register_operand" "f,0")
+ (match_operand:DF 5 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0) (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:DF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DF 0 "register_operand" "=f,f")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:DF 2 "register_operand" "f,0")
+ (match_operand:DF 3 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_fp_conditional_move (which_alternative, operands);")
+
+(define_expand "movxfcc"
+ [(set (match_operand:XF 0 "register_operand" "")
+ (if_then_else:XF (match_operand 1 "comparison_operator" "")
+ (match_operand:XF 2 "register_operand" "")
+ (match_operand:XF 3 "register_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ rtx temp;
+
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ /* The floating point conditional move instructions don't directly
+ support conditions resulting from a signed integer comparison. */
+
+ switch (GET_CODE (operands[1]))
+ {
+ case LT:
+ case LE:
+ case GE:
+ case GT:
+ temp = emit_store_flag (gen_reg_rtx (QImode),
+ GET_CODE (operands[1]), i386_compare_op0, i386_compare_op1,
+ VOIDmode, 0, 0);
+
+ if (!temp)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (NE, QImode, temp, const0_rtx);
+ break;
+
+ default:
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+ break;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:XF (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:XF 4 "register_operand" "f,f,0,0")
+ (match_operand:XF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f,f,f")
+ (if_then_else:XF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:XF 4 "register_operand" "f,f,0,0")
+ (match_operand:XF 5 "register_operand" "0,0,f,f")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT
+ && GET_CODE (operands[1]) != LT && GET_CODE (operands[1]) != LE
+ && GET_CODE (operands[1]) != GE && GET_CODE (operands[1]) != GT"
+ "#")
+
+(define_split
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (if_then_else:XF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:XF 3 "register_operand" "f,0")
+ (match_operand:XF 4 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:XF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (if_then_else:XF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:XF 4 "register_operand" "f,0")
+ (match_operand:XF 5 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0) (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:XF (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:XF 0 "register_operand" "=f,f")
+ (if_then_else:XF (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:XF 2 "register_operand" "f,0")
+ (match_operand:XF 3 "register_operand" "0,f")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_fp_conditional_move (which_alternative, operands);")
+
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "nonimmediate_operand" "")
+ (match_operand:DI 3 "nonimmediate_operand" "")))]
+ "TARGET_CMOVE"
+ "
+{
+ if (GET_MODE_CLASS (GET_MODE (i386_compare_op0)) != MODE_INT)
+ FAIL;
+
+ operands[1] = gen_rtx_fmt_ee (GET_CODE (operands[1]),
+ GET_MODE (i386_compare_op0),
+ i386_compare_op0, i386_compare_op1);
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r,&r,&r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand:QI 2 "nonimmediate_operand" "q,m,q,m")
+ (match_operand:QI 3 "general_operand" "qmn,qn,qmn,qn")])
+ (match_operand:DI 4 "nonimmediate_operand" "ro,ro,0,0")
+ (match_operand:DI 5 "nonimmediate_operand" "0,0,ro,ro")))]
+ "TARGET_CMOVE"
+ "#")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r,&r,&r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "r,m,r,m")
+ (match_operand 3 "general_operand" "rmi,ri,rmi,ri")])
+ (match_operand:DI 4 "nonimmediate_operand" "ro,ro,0,0")
+ (match_operand:DI 5 "nonimmediate_operand" "0,0,ro,ro")))]
+ "TARGET_CMOVE && GET_MODE_CLASS (GET_MODE (operands[2])) == MODE_INT"
+ "#")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (const_int 0)])
+ (match_operand:DI 3 "nonimmediate_operand" "ro,0")
+ (match_operand:DI 4 "nonimmediate_operand" "0,ro")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0)
+ (match_dup 2))
+ (set (match_dup 0)
+ (if_then_else:DI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 3) (match_dup 4)))]
+ "")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "nonimmediate_operand" "")
+ (match_operand 3 "general_operand" "")])
+ (match_operand:DI 4 "nonimmediate_operand" "ro,0")
+ (match_operand:DI 5 "nonimmediate_operand" "0,ro")))]
+ "TARGET_CMOVE && reload_completed"
+ [(set (cc0) (compare (match_dup 2) (match_dup 3)))
+ (set (match_dup 0)
+ (if_then_else:DI (match_op_dup 1 [(cc0) (const_int 0)])
+ (match_dup 4) (match_dup 5)))]
+ "")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=&r,&r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(cc0) (const_int 0)])
+ (match_operand:DI 2 "nonimmediate_operand" "ro,0")
+ (match_operand:DI 3 "nonimmediate_operand" "0,ro")))]
+ "TARGET_CMOVE && reload_completed"
+ "* return output_int_conditional_move (which_alternative, operands);")
+
+(define_insn "strlensi_unroll"
+ [(set (match_operand:SI 0 "register_operand" "=&r,&r")
+ (unspec:SI [(mem:BLK (match_operand:SI 1 "address_operand" "r,r"))
+ (match_operand:SI 2 "immediate_operand" "i,i")] 0))
+ (clobber (match_scratch:SI 3 "=&q,&r"))]
+ "optimize > 1"
+ "* return output_strlen_unroll (operands);")
+
+;; the only difference between the following patterns is the register preference
+;; on a pentium using a q-register saves one clock cycle per 4 characters
+
+(define_insn "strlensi_unroll4"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(mem:BLK (match_operand:SI 3 "register_operand" "0,0"))
+ (match_operand:SI 1 "immediate_operand" "i,i")
+ (match_operand:SI 2 "register_operand" "+q,!r")] 0))
+ (clobber (match_dup 2))]
+ "(TARGET_USE_ANY_REG && optimize > 1)"
+ "* return output_strlen_unroll (operands);")
+
+(define_insn "strlensi_unroll5"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(mem:BLK (match_operand:SI 3 "register_operand" "0"))
+ (match_operand:SI 1 "immediate_operand" "i")
+ (match_operand:SI 2 "register_operand" "+q")] 0))
+ (clobber (match_dup 2))]
+ "(TARGET_USE_Q_REG && optimize > 1)"
+ "* return output_strlen_unroll (operands);"
+)
+
+(define_insn "allocate_stack_worker"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "a")] 3)
+ (set (reg:SI 7) (minus:SI (reg:SI 7) (match_dup 0)))
+ (clobber (match_dup 0))]
+ "TARGET_STACK_PROBE"
+ "* return AS1(call,__alloca);")
+
+(define_expand "allocate_stack"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (reg:SI 7) (match_operand:SI 1 "general_operand" "")))
+ (set (reg:SI 7) (minus:SI (reg:SI 7) (match_dup 1)))]
+ "TARGET_STACK_PROBE"
+ "
+{
+#ifdef CHECK_STACK_LIMIT
+ if (GET_CODE (operands[1]) == CONST_INT
+ && INTVAL (operands[1]) < CHECK_STACK_LIMIT)
+ emit_insn (gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ operands[1]));
+ else
+#endif
+ emit_insn (gen_allocate_stack_worker (copy_to_mode_reg (SImode,
+ operands[1])));
+
+ emit_move_insn (operands[0], virtual_stack_dynamic_rtx);
+ DONE;
+}")
+
+(define_expand "exception_receiver"
+ [(const_int 0)]
+ "flag_pic"
+ "
+{
+ load_pic_register (1);
+ DONE;
+}")
diff --git a/contrib/gcc/config/i386/isc.h b/contrib/gcc/config/i386/isc.h
index cf8c5f6..5c39896 100644
--- a/contrib/gcc/config/i386/isc.h
+++ b/contrib/gcc/config/i386/isc.h
@@ -19,7 +19,7 @@
#define LIB_SPEC "%{shlib:-lc_s} %{posix:-lcposix} %{Xp:-lcposix} -lc -lg"
#undef CPP_SPEC
-#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{Xp:-D_POSIX_SOURCE}"
+#define CPP_SPEC "%(cpp_cpu) %{posix:-D_POSIX_SOURCE} %{Xp:-D_POSIX_SOURCE}"
/* ISC 2.2 uses `char' for `wchar_t'. */
#undef WCHAR_TYPE
@@ -37,7 +37,7 @@
#undef RETURN_POPS_ARGS
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
- (TREE_CODE (FUNTYPE) == IDENTIFIER_NODE ? 0 \
+ ((FUNDECL) && TREE_CODE (FUNDECL) == IDENTIFIER_NODE ? 0 \
: (TARGET_RTD \
&& (TYPE_ARG_TYPES (FUNTYPE) == 0 \
|| (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
@@ -87,3 +87,7 @@
} \
fputs ("\"\n", FILE); \
} while (0)
+
+/* Work around assembler forward label references generated in exception
+ handling code. */
+#define DWARF2_UNWIND_INFO 0
diff --git a/contrib/gcc/config/i386/linux-aout.h b/contrib/gcc/config/i386/linux-aout.h
index 7e46c68..de81d87 100644
--- a/contrib/gcc/config/i386/linux-aout.h
+++ b/contrib/gcc/config/i386/linux-aout.h
@@ -1,5 +1,5 @@
-/* Definitions for Intel 386 running Linux
- Copyright (C) 1992, 1994, 1995 Free Software Foundation, Inc.
+/* Definitions for Intel 386 running Linux-based GNU systems using a.out.
+ Copyright (C) 1992, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
Contributed by H.J. Lu (hjl@nynexst.com)
This file is part of GNU CC.
@@ -25,17 +25,16 @@ Boston, MA 02111-1307, USA. */
#include <i386/gstabs.h>
#include <linux-aout.h> /* some common stuff */
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START "#"
+
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -Dlinux -Asystem(posix)"
#undef CPP_SPEC
-#if TARGET_CPU_DEFAULT == 2
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{!m386:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#else
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{m486:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#endif
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
@@ -57,9 +56,8 @@ Boston, MA 02111-1307, USA. */
#if 1
/* We no longer link with libc_p.a or libg.a by default. If you
- * want to profile or debug the Linux C library, please add
- * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
- */
+ want to profile or debug the GNU/Linux C library, please add
+ -lc_p or -ggdb to LDFLAGS at the link time, respectively. */
#define LIB_SPEC \
"%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
#else
diff --git a/contrib/gcc/config/i386/linux-oldld.h b/contrib/gcc/config/i386/linux-oldld.h
index c3066ba..4e3085b 100644
--- a/contrib/gcc/config/i386/linux-oldld.h
+++ b/contrib/gcc/config/i386/linux-oldld.h
@@ -1,5 +1,6 @@
-/* Definitions for Intel 386 running Linux with pre-BFD a.out linkers
- Copyright (C) 1995 Free Software Foundation, Inc.
+/* Definitions for Intel 386 running Linux-based GNU systems with pre-BFD
+ a.out linkers.
+ Copyright (C) 1995, 1997, 1998 Free Software Foundation, Inc.
Contributed by Michael Meissner (meissner@cygnus.com)
This file is part of GNU CC.
@@ -25,17 +26,16 @@ Boston, MA 02111-1307, USA. */
#include <i386/gstabs.h>
#include <linux-aout.h> /* some common stuff */
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START "#"
+
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -Dlinux -Asystem(posix)"
#undef CPP_SPEC
-#if TARGET_CPU_DEFAULT == 2
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{!m386:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#else
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{m486:-D__i486__} %{posix:-D_POSIX_SOURCE}"
-#endif
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
#undef SIZE_TYPE
#define SIZE_TYPE "unsigned int"
@@ -57,9 +57,8 @@ Boston, MA 02111-1307, USA. */
#if 1
/* We no longer link with libc_p.a or libg.a by default. If you
- * want to profile or debug the Linux C library, please add
- * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
- */
+ want to profile or debug the GNU/Linux C library, please add
+ lc_p or -ggdb to LDFLAGS at the link time, respectively. */
#define LIB_SPEC \
"%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
#else
diff --git a/contrib/gcc/config/i386/linux.h b/contrib/gcc/config/i386/linux.h
index a0b49ba..8e84bb5 100644
--- a/contrib/gcc/config/i386/linux.h
+++ b/contrib/gcc/config/i386/linux.h
@@ -1,5 +1,5 @@
-/* Definitions for Intel 386 running Linux with ELF format
- Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+/* Definitions for Intel 386 running Linux-based GNU systems with ELF format.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
Contributed by Eric Youngdale.
Modified for stabs-in-ELF by H.J. Lu.
@@ -37,16 +37,19 @@ Boston, MA 02111-1307, USA. */
#undef DEFAULT_PCC_STRUCT_RETURN
#define DEFAULT_PCC_STRUCT_RETURN 1
+#undef ASM_COMMENT_START
+#define ASM_COMMENT_START "#"
+
/* This is how to output an element of a case-vector that is relative.
This is only used for PIC code. See comments by the `casesi' insn in
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
-#define JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
/* Copy this from the svr4 specifications... */
/* Define the register numbers to be used in Dwarf debugging information.
@@ -147,44 +150,19 @@ Boston, MA 02111-1307, USA. */
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE BITS_PER_WORD
+/* The egcs-1.1 branch is the last time we will have -Di386. -D__i386__ is the thing to use. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-D__ELF__ -Dunix -Di386 -Dlinux -Asystem(unix) -Asystem(posix) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -Di386 -D__i386__ -Dlinux -Asystem(posix)"
#undef CPP_SPEC
#ifdef USE_GNULIBC_1
-#if TARGET_CPU_DEFAULT == 2
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{!m386:-D__i486__} %{posix:-D_POSIX_SOURCE}"
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
#else
-#define CPP_SPEC "%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{m486:-D__i486__} %{posix:-D_POSIX_SOURCE}"
+#define CPP_SPEC "%(cpp_cpu) %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
#endif
-#else /* not USE_GNULIBC_1 */
-#define CPP_SPEC "%(cpp_cpu) %[cpp_cpu] %{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
-#endif /* not USE_GNULIBC_1 */
-#undef LIBGCC_SPEC
-#define LIBGCC_SPEC "-lgcc"
-
-#undef LIB_SPEC
-#ifdef USE_GNULIBC_1
-#if 1
-/* We no longer link with libc_p.a or libg.a by default. If you
- * want to profile or debug the Linux C library, please add
- * -lc_p or -ggdb to LDFLAGS at the link time, respectively.
- */
-#define LIB_SPEC \
- "%{!shared: %{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} \
- %{!ggdb:-lc} %{ggdb:-lg}}"
-#else
-#define LIB_SPEC \
- "%{!shared: \
- %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
- %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
-#endif
-#else
-#define LIB_SPEC \
- "%{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
- %{profile:-lc_p} %{!profile: -lc}}"
-#endif /* not USE_GNULIBC_1 */
+#undef CC1_SPEC
+#define CC1_SPEC "%(cc1_cpu) %{profile:-p}"
/* Provide a LINK_SPEC appropriate for Linux. Here we provide support
for the special GCC options -static and -shared, which allow us to
@@ -221,7 +199,7 @@ Boston, MA 02111-1307, USA. */
%{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.1}} \
%{static:-static}}}"
#endif
-#else /* not USE_GNULIBC_1 */
+#else
#define LINK_SPEC "-m elf_i386 %{shared:-shared} \
%{!shared: \
%{!ibcs: \
@@ -229,7 +207,28 @@ Boston, MA 02111-1307, USA. */
%{rdynamic:-export-dynamic} \
%{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2}} \
%{static:-static}}}"
-#endif /* not USE_GNULIBC_1 */
+#endif
/* Get perform_* macros to build libgcc.a. */
#include "i386/perform.h"
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
+
+ This is used to align code labels according to Intel recommendations. */
+
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ if ((LOG)!=0) \
+ if ((MAX_SKIP)==0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP))
+#endif
diff --git a/contrib/gcc/config/i386/lynx.h b/contrib/gcc/config/i386/lynx.h
index 4ac00a0..73111f9 100644
--- a/contrib/gcc/config/i386/lynx.h
+++ b/contrib/gcc/config/i386/lynx.h
@@ -1,5 +1,5 @@
/* Definitions for Intel 386 running LynxOS.
- Copyright (C) 1993, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1993, 1995, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -24,12 +24,12 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES "-Dunix -Di386 -DI386 -DLynx -DIBITS32 -Asystem(unix) -Asystem(lynx) -Acpu(i386) -Amachine(i386)"
-/* This is how to output a reference to a user-level label named NAME. */
+/* The prefix to add to user-visible assembler symbols. */
/* Override the svr3 convention of adding a leading underscore. */
-#undef ASM_OUTPUT_LABELREF
-#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "%s", NAME)
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
/* Apparently LynxOS clobbers ebx when you call into the OS. */
diff --git a/contrib/gcc/config/i386/mingw32.h b/contrib/gcc/config/i386/mingw32.h
new file mode 100644
index 0000000..cd4d3e6
--- /dev/null
+++ b/contrib/gcc/config/i386/mingw32.h
@@ -0,0 +1,95 @@
+/* Operating system specific defines to be used when targeting GCC for
+ hosting on Windows32, using GNU tools and the Windows32 API Library,
+ as distinct from winnt.h, which is used to build GCC for use with a
+ windows style library and tool set and uses the Microsoft tools.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Most of this is the same as for Cygwin32, except for changing some
+ specs. */
+
+#include "i386/cygwin32.h"
+
+/* Please keep changes to CPP_PREDEFINES in sync with i386/crtdll. The
+ only difference between the two should be __MSVCRT__ needed to
+ distinguish MSVC from CRTDLL runtime in mingw headers. */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -D_WIN32 -DWIN32 -D__WIN32__ \
+ -D__MINGW32__ -D__MSVCRT__ -DWINNT -D_X86_=1 -D__STDC__=1\
+ -D__stdcall=__attribute__((__stdcall__)) \
+ -D_stdcall=__attribute__((__stdcall__)) \
+ -D__cdecl=__attribute__((__cdecl__)) \
+ -D__declspec(x)=__attribute__((x)) \
+ -Asystem(winnt) -Acpu(i386) -Amachine(i386)"
+
+/* Specific a different directory for the standard include files. */
+#undef STANDARD_INCLUDE_DIR
+#define STANDARD_INCLUDE_DIR "/usr/local/i386-mingw32/include"
+
+#define STANDARD_INCLUDE_COMPONENT "MINGW32"
+
+/* For Windows applications, include more libraries, but always include
+ kernel32. */
+#undef LIB_SPEC
+#define LIB_SPEC \
+"%{mwindows:-luser32 -lgdi32 -lcomdlg32} -lkernel32 -ladvapi32 -lshell32"
+
+/* Include in the mingw32 libraries with libgcc */
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC "-lmingw32 -lgcc -lmoldname -lmsvcrt"
+
+/* Specify a different entry point when linking a DLL */
+#undef LINK_SPEC
+#define LINK_SPEC \
+"%{mwindows:--subsystem windows} %{mdll:--dll -e _DllMainCRTStartup@12}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{mdll:dllcrt2%O%s} %{!mdll:crt2%O%s}"
+
+#define MATH_LIBRARY "-lmsvcrt"
+
+/* Output STRING, a string representing a filename, to FILE. We canonicalize
+ it to be in MS-DOS format. */
+#define OUTPUT_QUOTED_STRING(FILE, STRING) \
+do { \
+ char c; \
+ \
+ putc ('\"', asm_file); \
+ if (STRING[1] == ':' \
+ && (STRING[2] == '/' || STRING[2] == '\\')) \
+ { \
+ putc ('/', asm_file); \
+ putc ('/', asm_file); \
+ putc (*string, asm_file); \
+ string += 2; \
+ } \
+ \
+ while ((c = *string++) != 0) \
+ { \
+ if (c == '\\') \
+ c = '/'; \
+ \
+ if (c == '\"') \
+ putc ('\\', asm_file); \
+ putc (c, asm_file); \
+ } \
+ \
+ putc ('\"', asm_file); \
+} while (0)
+
diff --git a/contrib/gcc/config/i386/moss.h b/contrib/gcc/config/i386/moss.h
new file mode 100644
index 0000000..dadf3d8
--- /dev/null
+++ b/contrib/gcc/config/i386/moss.h
@@ -0,0 +1,34 @@
+/* Definitions for Intel 386 running MOSS
+ Copyright (C) 1996 Free Software Foundation, Inc.
+ Contributed by Bryan Ford <baford@cs.utah.edu>
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */
+
+/* I believe in reuse... */
+#include "i386/linux.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__ELF__ -Di386 -Dmoss -Asystem(posix) -Acpu(i386) -Amachine(i386)"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtn.o%s"
+
+#undef LINK_SPEC
+
diff --git a/contrib/gcc/config/i386/netbsd.h b/contrib/gcc/config/i386/netbsd.h
index 180ff35..5978aec 100644
--- a/contrib/gcc/config/i386/netbsd.h
+++ b/contrib/gcc/config/i386/netbsd.h
@@ -30,13 +30,6 @@
#undef WCHAR_TYPE_SIZE
#define WCHAR_TYPE_SIZE 32
-#define HANDLE_SYSV_PRAGMA
-
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
@@ -51,18 +44,25 @@
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
-#define JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
/* Don't default to pcc-struct-return, because gcc is the only compiler, and
we want to retain compatibility with older gcc versions. */
#define DEFAULT_PCC_STRUCT_RETURN 0
+/* i386 netbsd still uses old binutils that don't insert nops by default
+ when the .align directive demands to insert extra space in the text
+ segment. */
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d,0x90\n", (LOG))
+
/* Profiling routines, partially copied from i386/osfrose.h. */
/* Redefine this to use %eax instead of %edx. */
@@ -78,3 +78,8 @@
fprintf (FILE, "\tcall mcount\n"); \
} \
}
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#define DWARF2_UNWIND_INFO 0
+
diff --git a/contrib/gcc/config/i386/next.h b/contrib/gcc/config/i386/next.h
index c0d6d72..65f7402 100644
--- a/contrib/gcc/config/i386/next.h
+++ b/contrib/gcc/config/i386/next.h
@@ -1,5 +1,5 @@
/* Target definitions for GNU compiler for Intel x86 CPU running NeXTSTEP
- Copyright (C) 1993, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1993, 1995, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -124,8 +124,8 @@ Boston, MA 02111-1307, USA. */
GAS requires the %cl argument, so override unx386.h. */
-#undef AS3_SHIFT_DOUBLE
-#define AS3_SHIFT_DOUBLE(a,b,c,d) AS3 (a,b,c,d)
+#undef SHIFT_DOUBLE_OMITS_COUNT
+#define SHIFT_DOUBLE_OMITS_COUNT 0
/* Print opcodes the way that GAS expects them. */
#define GAS_MNEMONICS 1
@@ -216,7 +216,7 @@ Boston, MA 02111-1307, USA. */
#undef RETURN_POPS_ARGS
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
- (TREE_CODE (FUNTYPE) == IDENTIFIER_NODE \
+ ((FUNDECL) && TREE_CODE (FUNDECL) == IDENTIFIER_NODE \
? 0 \
: (TARGET_RTD \
&& (TYPE_ARG_TYPES (FUNTYPE) == 0 \
@@ -224,3 +224,10 @@ Boston, MA 02111-1307, USA. */
== void_type_node))) ? (SIZE) : 0)
/* END Calling Convention CHANGES */
+
+/* NeXT still uses old binutils that don't insert nops by default
+ when the .align directive demands to insert extra space in the text
+ segment. */
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d,0x90\n", (LOG))
diff --git a/contrib/gcc/config/i386/openbsd.h b/contrib/gcc/config/i386/openbsd.h
new file mode 100644
index 0000000..1b56262
--- /dev/null
+++ b/contrib/gcc/config/i386/openbsd.h
@@ -0,0 +1,130 @@
+/* Configuration for an OpenBSD i386 target.
+
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* This is tested by i386gas.h. */
+#define YES_UNDERSCORES
+
+#include <i386/gstabs.h>
+
+/* Get perform_* macros to build libgcc.a. */
+#include <i386/perform.h>
+
+/* Get generic OpenBSD definitions. */
+#define OBSD_OLD_GAS
+#include <openbsd.h>
+
+/* Run-time target specifications */
+#define CPP_PREDEFINES "-D__unix__ -D__i386__ -D__OpenBSD__ -Asystem(unix) -Asystem(OpenBSD) -Acpu(i386) -Amachine(i386)"
+
+/* Layout of source language data types. */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Assembler format: overall framework. */
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "#APP\n"
+
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "#NO_APP\n"
+
+/* The following macros were originally stolen from i386v4.h.
+ These have to be defined to get PIC code correct. */
+
+/* Assembler format: dispatch tables. */
+
+/* How to output an element of a case-vector that is relative.
+ This is only used for PIC code. See comments by the `casesi' insn in
+ i386.md for an explanation of the expression this outputs. */
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
+
+/* Assembler format: sections. */
+
+/* Indicate when jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* Stack & calling: aggregate returns. */
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Assembler format: alignment output. */
+
+/* Kludgy test: when gas is upgraded, it will have p2align, and no problems
+ with nops. */
+#ifndef HAVE_GAS_MAX_SKIP_P2ALIGN
+/* i386 OpenBSD still uses an older gas that doesn't insert nops by default
+ when the .align directive demands to insert extra space in the text
+ segment. */
+#undef ASM_OUTPUT_ALIGN
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG)!=0) fprintf ((FILE), "\t.align %d,0x90\n", (LOG))
+#endif
+
+/* Stack & calling: profiling. */
+
+/* OpenBSD's profiler recovers all information from the stack pointer.
+ The icky part is not here, but in machine/profile.h. */
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fputs (flag_pic ? "\tcall mcount@PLT\n": "\tcall mcount\n", FILE);
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. egcs doesn't try too hard to check internal
+ configuration files... */
+#define DWARF2_UNWIND_INFO 0
+
+/* Assembler format: alignment output. */
+
+/* A C statement to output to the stdio stream FILE an assembler
+ command to advance the location counter to a multiple of 1<<LOG
+ bytes if it is within MAX_SKIP bytes.
+
+ This will be used to align code labels according to Intel
+ recommendations, in prevision of binutils upgrade. */
+#ifdef HAVE_GAS_MAX_SKIP_P2ALIGN
+#define ASM_OUTPUT_MAX_SKIP_ALIGN(FILE,LOG,MAX_SKIP) \
+ do { \
+ if ((LOG) != 0) \
+ if ((MAX_SKIP) == 0) fprintf ((FILE), "\t.p2align %d\n", (LOG)); \
+ else fprintf ((FILE), "\t.p2align %d,,%d\n", (LOG), (MAX_SKIP)); \
+ } while (0)
+#endif
+
+/* Note that we pick up ASM_OUTPUT_MI_THUNK from unix.h. */
+
diff --git a/contrib/gcc/config/i386/osf1-ci.asm b/contrib/gcc/config/i386/osf1-ci.asm
new file mode 100644
index 0000000..a0f0773
--- /dev/null
+++ b/contrib/gcc/config/i386/osf1-ci.asm
@@ -0,0 +1,65 @@
+! crti.s for OSF/1, x86; derived from sol2-ci.asm.
+
+! Copyright (C) 1993, 1998 Free Software Foundation, Inc.
+! Written By Fred Fish, Nov 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just supplies labeled starting points for the .init and .fini
+! sections. It is linked in before the values-Xx.o files and also before
+! crtbegin.o.
+
+ .file "crti.s"
+ .ident "GNU C crti.s"
+
+ .section .init
+ .globl _init
+ .type _init,@function
+_init:
+
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+_fini:
+
+.globl _init_init_routine
+.data
+ .align 4
+ .type _init_init_routine,@object
+ .size _init_init_routine,4
+_init_init_routine:
+ .long _init
+.globl _init_fini_routine
+ .align 4
+ .type _init_fini_routine,@object
+ .size _init_fini_routine,4
+_init_fini_routine:
+ .long _fini
diff --git a/contrib/gcc/config/i386/osf1-cn.asm b/contrib/gcc/config/i386/osf1-cn.asm
new file mode 100644
index 0000000..a10298f
--- /dev/null
+++ b/contrib/gcc/config/i386/osf1-cn.asm
@@ -0,0 +1,46 @@
+! crtn.s for OSF/1, x86; derived from sol2-cn.asm.
+
+! Copyright (C) 1993, 1998 Free Software Foundation, Inc.
+! Written By Fred Fish, Nov 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just supplies returns for the .init and .fini sections. It is
+! linked in after all other files.
+
+ .file "crtn.o"
+ .ident "GNU C crtn.o"
+
+ .section .init
+ ret $0x0
+
+ .section .fini
+ ret $0x0
diff --git a/contrib/gcc/config/i386/osf1elf.h b/contrib/gcc/config/i386/osf1elf.h
new file mode 100644
index 0000000..da61e8b
--- /dev/null
+++ b/contrib/gcc/config/i386/osf1elf.h
@@ -0,0 +1,260 @@
+/* OSF/1 1.3 now is compitable with SVR4, so include sysv4.h, and
+ put difference here. */
+
+#include <stdio.h>
+#include "i386/sysv4.h" /* Base i386 target machine definitions */
+#define _sys_siglist sys_siglist
+extern char *sys_siglist[];
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 OSF/1)");
+
+/* WORD_SWITCH_TAKES_ARG defined in svr4 is not correct. We also
+ need an extra -soname */
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp (STR, "Tdata") || !strcmp (STR, "Ttext") \
+ || !strcmp (STR, "Tbss") || !strcmp (STR, "soname"))
+
+/* Note, -fpic and -fPIC are equivalent */
+#undef CPP_SPEC
+#define CPP_SPEC "\
+%{fpic: -D__SHARED__} %{fPIC: %{!fpic: -D__SHARED__}} \
+%{.S: %{!ansi:%{!traditional:%{!traditional-cpp:%{!ftraditional: -traditional}}}}} \
+%{.S: -D__LANGUAGE_ASSEMBLY %{!ansi:-DLANGUAGE_ASSEMBLY}} \
+%{.cc: -D__LANGUAGE_C_PLUS_PLUS} \
+%{.cxx: -D__LANGUAGE_C_PLUS_PLUS} \
+%{.C: -D__LANGUAGE_C_PLUS_PLUS} \
+%{.m: -D__LANGUAGE_OBJECTIVE_C} \
+%{!.S: -D__LANGUAGE_C %{!ansi:-DLANGUAGE_C}}"
+
+/* -mmcount or -mno-mcount should be used with -pg or -p */
+#undef CC1_SPEC
+#define CC1_SPEC "%{p: %{!mmcount: %{!mno-mcount: -mno-mcount }}} \
+%{!p: %{pg: %{!mmcount: %{!mno-mcount: -mno-mcount }}}}"
+
+/* Note, -D__NO_UNDERSCORES__ -D__ELF__ are provided in the older version of
+ OSF/1 gcc. We keep them here, so that old /usr/include/i386/asm.h works.
+ */
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-D__NO_UNDERSCORES__ -D__ELF__ -DOSF -DOSF1 -Di386 -Dunix -Asystem(xpg4) -Asystem(osf1) -Acpu(i386) -Amachine(i386)"
+
+/* current OSF/1 doesn't provide separate crti.o and gcrti.o (and also, crtn.o
+ and gcrtn.o) for profile. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}}}\
+ crti.o%s \
+ crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{v*: -v}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{v*: -v} \
+ %{h*} %{z*} \
+ %{dy:-call_shared} %{dn:-static} \
+ %{static:-static} \
+ %{shared:-shared} \
+ %{call_shared:-call_shared} \
+ %{symbolic:-Bsymbolic -shared -call_shared} \
+ %{!dy: %{!dn: %{!static: %{!shared: %{!symbolic: \
+ %{noshrlib: -static } \
+ %{!noshrlib: -call_shared}}}}}}"
+
+#undef MD_EXEC_PREFIX
+#define MD_EXEC_PREFIX "/usr/ccs/gcc/"
+
+#undef MD_STARTFILE_PREFIX
+#define MD_STARTFILE_PREFIX "/usr/ccs/lib/"
+
+/* Define this macro meaning that gcc should find the library 'libgcc.a'
+ by hand, rather than passing the argument '-lgcc' to tell the linker
+ to do the search */
+#define LINK_LIBGCC_SPECIAL
+
+/* This goes with LINK_LIBGCC_SPECIAL, we need tell libgcc.a differently */
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC "%{!shared:%{!symbolic:libgcc.a%s}}"
+
+/* A C statement to output assembler commands which will identify the object
+ file as having been compile with GNU CC. We don't need or want this for
+ OSF1. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE)
+
+/* Identify the front-end which produced this file. To keep symbol
+ space down, and not confuse kdb, only do this if the language is
+ not C. */
+#define ASM_IDENTIFY_LANGUAGE(STREAM) \
+{ \
+ if (strcmp (lang_identify (), "c") != 0) \
+ output_lang_identify (STREAM); \
+}
+
+/* Specify size_t, ptrdiff_t, and wchar_t types. */
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+
+#define SIZE_TYPE "long unsigned int"
+#define PTRDIFF_TYPE "int"
+#define WCHAR_TYPE "unsigned int"
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* Turn off long double being 96 bits. */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+/* Work with OSF/1 profile */
+#define MASK_NO_MCOUNT 000200000000 /* profiling uses mcount_ptr */
+
+#define TARGET_MCOUNT ((target_flags & MASK_NO_MCOUNT) == 0)
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ { "mcount", -MASK_NO_MCOUNT}, \
+ { "no-mcount", MASK_NO_MCOUNT},
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used.
+
+ We override it here to allow for the new profiling code to go before
+ the prologue and the old mcount code to go after the prologue (and
+ after %ebx has been set up for ELF shared library support). */
+#if 0
+#define OSF_PROFILE_BEFORE_PROLOGUE \
+ (!TARGET_MCOUNT \
+ && !current_function_needs_context \
+ && (!flag_pic \
+ || !frame_pointer_needed \
+ || (!current_function_uses_pic_offset_table \
+ && !current_function_uses_const_pool)))
+#else
+#define OSF_PROFILE_BEFORE_PROLOGUE 0
+#endif
+#undef FUNCTION_PROLOGUE
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+do \
+ { \
+ char *prefix = ""; \
+ char *lprefix = LPREFIX; \
+ int labelno = profile_label_no; \
+ \
+ if (profile_flag && OSF_PROFILE_BEFORE_PROLOGUE) \
+ { \
+ if (!flag_pic) \
+ { \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", lprefix, labelno); \
+ fprintf (FILE, "\tcall *%s_mcount_ptr\n", prefix); \
+ } \
+ \
+ else \
+ { \
+ static int call_no = 0; \
+ \
+ fprintf (FILE, "\tcall %sPc%d\n", lprefix, call_no); \
+ fprintf (FILE, "%sPc%d:\tpopl %%eax\n", lprefix, call_no); \
+ fprintf (FILE, "\taddl $_GLOBAL_OFFSET_TABLE_+[.-%sPc%d],%%eax\n", \
+ lprefix, call_no++); \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%eax),%%edx\n", \
+ lprefix, labelno); \
+ fprintf (FILE, "\tmovl %s_mcount_ptr@GOT(%%eax),%%eax\n", \
+ prefix); \
+ fprintf (FILE, "\tcall *(%%eax)\n"); \
+ } \
+ } \
+ \
+ function_prologue (FILE, SIZE); \
+ } \
+while (0)
+
+/* A C statement or compound statement to output to FILE some assembler code to
+ call the profiling subroutine `mcount'. Before calling, the assembler code
+ must load the address of a counter variable into a register where `mcount'
+ expects to find the address. The name of this variable is `LP' followed by
+ the number LABELNO, so you would generate the name using `LP%d' in a
+ `fprintf'.
+
+ The details of how the address should be passed to `mcount' are determined
+ by your operating system environment, not by GNU CC. To figure them out,
+ compile a small program for profiling using the system's installed C
+ compiler and look at the assembler code that results. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+do \
+ { \
+ if (!OSF_PROFILE_BEFORE_PROLOGUE) \
+ { \
+ char *prefix = ""; \
+ char *lprefix = LPREFIX; \
+ int labelno = LABELNO; \
+ \
+ /* Note that OSF/rose blew it in terms of calling mcount, \
+ since OSF/rose prepends a leading underscore, but mcount's \
+ doesn't. At present, we keep this kludge for ELF as well \
+ to allow old kernels to build profiling. */ \
+ \
+ if (flag_pic \
+ && !current_function_uses_pic_offset_table \
+ && !current_function_uses_const_pool) \
+ abort (); \
+ \
+ if (TARGET_MCOUNT && flag_pic) \
+ { \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
+ lprefix, labelno); \
+ fprintf (FILE, "\tcall *%smcount@GOT(%%ebx)\n", prefix); \
+ } \
+ \
+ else if (TARGET_MCOUNT) \
+ { \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", lprefix, labelno); \
+ fprintf (FILE, "\tcall %smcount\n", prefix); \
+ } \
+ \
+ else if (flag_pic && frame_pointer_needed) \
+ { \
+ fprintf (FILE, "\tmovl 4(%%ebp),%%ecx\n"); \
+ fprintf (FILE, "\tpushl %%ecx\n"); \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
+ lprefix, labelno); \
+ fprintf (FILE, "\tmovl _mcount_ptr@GOT(%%ebx),%%eax\n"); \
+ fprintf (FILE, "\tcall *(%%eax)\n"); \
+ fprintf (FILE, "\tpopl %%eax\n"); \
+ } \
+ \
+ else if (frame_pointer_needed) \
+ { \
+ fprintf (FILE, "\tmovl 4(%%ebp),%%ecx\n"); \
+ fprintf (FILE, "\tpushl %%ecx\n"); \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", lprefix, labelno); \
+ fprintf (FILE, "\tcall *_mcount_ptr\n"); \
+ fprintf (FILE, "\tpopl %%eax\n"); \
+ } \
+ \
+ else \
+ abort (); \
+ } \
+ } \
+while (0)
+
+#if defined (CROSS_COMPILE) && defined (HOST_BITS_PER_INT) && defined (HOST_BITS_PER_LONG) && defined (HOST_BITS_PER_LONGLONG)
+#if (HOST_BITS_PER_INT==32) && (HOST_BITS_PER_LONG==64) && (HOST_BITS_PER_LONGLONG==64)
+#define REAL_ARITHMETIC
+#endif
+#endif
diff --git a/contrib/gcc/config/i386/osf1elfgdb.h b/contrib/gcc/config/i386/osf1elfgdb.h
new file mode 100644
index 0000000..af6efa2
--- /dev/null
+++ b/contrib/gcc/config/i386/osf1elfgdb.h
@@ -0,0 +1,7 @@
+/* Target definitions for GNU compiler for Intel 80386 running OSF/1 1.3+
+ with gas and gdb. */
+
+/* Use stabs instead of DWARF debug format. */
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "i386/osf1elf.h"
diff --git a/contrib/gcc/config/i386/osfelf.h b/contrib/gcc/config/i386/osfelf.h
index 7e71fe9..381ffc2 100644
--- a/contrib/gcc/config/i386/osfelf.h
+++ b/contrib/gcc/config/i386/osfelf.h
@@ -22,10 +22,10 @@ Boston, MA 02111-1307, USA. */
#include "config/i386/osfrose.h"
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-DOSF -DOSF1 -Dunix -Di386 -Asystem(unix) -Asystem(xpg4) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-DOSF -DOSF1 -Dunix -Asystem(xpg4)"
#undef CPP_SPEC
-#define CPP_SPEC "\
+#define CPP_SPEC "%(cpp_cpu) \
%{mrose: -D__ROSE__ %{!pic-none: -D__SHARED__}} \
%{!mrose: -D__ELF__ %{fpic: -D__SHARED__}} \
%{mno-underscores: -D__NO_UNDERSCORES__} \
diff --git a/contrib/gcc/config/i386/osfrose.h b/contrib/gcc/config/i386/osfrose.h
index 3aae1e1..9cfe187 100644
--- a/contrib/gcc/config/i386/osfrose.h
+++ b/contrib/gcc/config/i386/osfrose.h
@@ -1,6 +1,6 @@
/* Definitions of target machine for GNU compiler.
Intel 386 (OSF/1 with OSF/rose) version.
- Copyright (C) 1991, 1992, 1993 Free Software Foundation, Inc.
+ Copyright (C) 1991, 1992, 1993, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -36,16 +36,7 @@ Boston, MA 02111-1307, USA. */
-z* options (for the linker). */
#define SWITCH_TAKES_ARG(CHAR) \
- ( (CHAR) == 'D' \
- || (CHAR) == 'U' \
- || (CHAR) == 'o' \
- || (CHAR) == 'e' \
- || (CHAR) == 'T' \
- || (CHAR) == 'u' \
- || (CHAR) == 'I' \
- || (CHAR) == 'm' \
- || (CHAR) == 'L' \
- || (CHAR) == 'A' \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
|| (CHAR) == 'h' \
|| (CHAR) == 'z')
@@ -99,10 +90,10 @@ Boston, MA 02111-1307, USA. */
/* Change default predefines. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-DOSF -DOSF1 -Dunix -Di386 -Asystem(unix) -Asystem(xpg4) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-DOSF -DOSF1 -Dunix -Asystem(xpg4)"
#undef CPP_SPEC
-#define CPP_SPEC "\
+#define CPP_SPEC "%(cpp_cpu) \
%{!melf: -D__ROSE__ %{!pic-none: -D__SHARED__}} \
%{melf: -D__ELF__ %{fpic: -D__SHARED__}} \
%{mno-underscores: -D__NO_UNDERSCORES__} \
@@ -373,6 +364,12 @@ while (0)
fprintf (FILE, "%s%s%d:\n", (TARGET_UNDERSCORES) ? "" : ".", \
PREFIX, NUM)
+/* The prefix to add to user-visible assembler symbols. */
+
+/* target_flags is not accessible by the preprocessor */
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
/* This is how to output a reference to a user-level label named NAME. */
#undef ASM_OUTPUT_LABELREF
@@ -384,7 +381,7 @@ while (0)
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Output a definition */
@@ -407,10 +404,9 @@ while (0)
alignment to be done at such a time. Most machine descriptions do
not currently define the macro. */
-#undef ASM_OUTPUT_ALIGN_CODE
-#define ASM_OUTPUT_ALIGN_CODE(STREAM) \
- fprintf (STREAM, "\t.align\t%d\n", \
- (!TARGET_LARGE_ALIGN && i386_align_jumps > 2) ? 2 : i386_align_jumps)
+#undef LABEL_ALIGN_AFTER_BARRIER
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) \
+ ((!TARGET_LARGE_ALIGN && i386_align_jumps > 2) ? 2 : i386_align_jumps)
/* A C expression to output text to align the location counter in the
way that is desirable at the beginning of a loop.
@@ -419,9 +415,8 @@ while (0)
alignment to be done at such a time. Most machine descriptions do
not currently define the macro. */
-#undef ASM_OUTPUT_LOOP_ALIGN
-#define ASM_OUTPUT_LOOP_ALIGN(STREAM) \
- fprintf (STREAM, "\t.align\t%d\n", i386_align_loops)
+#undef LOOP_ALIGN
+#define LOOP_ALIGN(LABEL) (i386_align_loops)
/* A C statement to output to the stdio stream STREAM an assembler
command to advance the location counter to a multiple of 2 to the
diff --git a/contrib/gcc/config/i386/ptx4-i.h b/contrib/gcc/config/i386/ptx4-i.h
new file mode 100644
index 0000000..1537b4a
--- /dev/null
+++ b/contrib/gcc/config/i386/ptx4-i.h
@@ -0,0 +1,247 @@
+/* Target definitions for GNU compiler for Intel 80386 running Dynix/ptx v4
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+ Modified from sysv4.h
+ Originally written by Ron Guilmette (rfg@netcom.com).
+ Modified by Tim Wright (timw@sequent.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "i386/i386.h" /* Base i386 target machine definitions */
+#include "i386/att.h" /* Use the i386 AT&T assembler syntax */
+#include "ptx4.h" /* Rest of definitions (non architecture dependent) */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 Sequent Dynix/ptx Version 4)");
+
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+
+#undef RETURN_IN_MEMORY
+#define RETURN_IN_MEMORY(TYPE) \
+ (TYPE_MODE (TYPE) == BLKmode)
+
+/* Define which macros to predefine. _SEQUENT_ is our extension. */
+/* This used to define X86, but james@bigtex.cactus.org says that
+ is supposed to be defined optionally by user programs--not by default. */
+#define CPP_PREDEFINES \
+ "-Di386 -Dunix -D_SEQUENT_ -Asystem(unix) -Asystem(ptx4) -Acpu(i386) -Amachine(i386)"
+
+/* This is how to output assembly code to define a `float' constant.
+ We always have to use a .long pseudo-op to do this because the native
+ SVR4 ELF assembler is buggy and it generates incorrect values when we
+ try to use the .float pseudo-op instead. */
+
+#undef ASM_OUTPUT_FLOAT
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+do { long value; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), value); \
+ if (sizeof (int) == sizeof (long)) \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value); \
+ else \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value); \
+ } while (0)
+
+/* This is how to output assembly code to define a `double' constant.
+ We always have to use a pair of .long pseudo-ops to do this because
+ the native SVR4 ELF assembler is buggy and it generates incorrect
+ values when we try to use the .double pseudo-op instead. */
+
+#undef ASM_OUTPUT_DOUBLE
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+do { long value[2]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), value); \
+ if (sizeof (int) == sizeof (long)) \
+ { \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value[0]); \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value[1]); \
+ } \
+ else \
+ { \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value[0]); \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value[1]); \
+ } \
+ } while (0)
+
+
+#undef ASM_OUTPUT_LONG_DOUBLE
+#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) \
+do { long value[3]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE ((VALUE), value); \
+ if (sizeof (int) == sizeof (long)) \
+ { \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value[0]); \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value[1]); \
+ fprintf((FILE), "%s\t0x%x\n", ASM_LONG, value[2]); \
+ } \
+ else \
+ { \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value[0]); \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value[1]); \
+ fprintf((FILE), "%s\t0x%lx\n", ASM_LONG, value[2]); \
+ } \
+ } while (0)
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+
+ 0 for %eax (gnu regno = 0)
+ 1 for %ecx (gnu regno = 2)
+ 2 for %edx (gnu regno = 1)
+ 3 for %ebx (gnu regno = 3)
+ 4 for %esp (gnu regno = 7)
+ 5 for %ebp (gnu regno = 6)
+ 6 for %esi (gnu regno = 4)
+ 7 for %edi (gnu regno = 5)
+
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+
+ 8 for %eip (no gnu equivalent)
+ 9 for %eflags (no gnu equivalent)
+ 10 for %trapno (no gnu equivalent)
+
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+
+ 11 for %st(0) (gnu regno = 8)
+ 12 for %st(1) (gnu regno = 9)
+ 13 for %st(2) (gnu regno = 10)
+ 14 for %st(3) (gnu regno = 11)
+ 15 for %st(4) (gnu regno = 12)
+ 16 for %st(5) (gnu regno = 13)
+ 17 for %st(6) (gnu regno = 14)
+ 18 for %st(7) (gnu regno = 15)
+*/
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1))
+
+/* The routine used to output sequences of byte values. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable. Note that if we find subparts of the
+ character sequence which end with NUL (and which are shorter than
+ STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
+ do \
+ { \
+ register unsigned char *_ascii_bytes = (unsigned char *) (STR); \
+ register unsigned char *limit = _ascii_bytes + (LENGTH); \
+ register unsigned bytes_in_chunk = 0; \
+ for (; _ascii_bytes < limit; _ascii_bytes++) \
+ { \
+ register unsigned char *p; \
+ if (bytes_in_chunk >= 64) \
+ { \
+ fputc ('\n', (FILE)); \
+ bytes_in_chunk = 0; \
+ } \
+ for (p = _ascii_bytes; p < limit && *p != '\0'; p++) \
+ continue; \
+ if (p < limit && (p - _ascii_bytes) <= STRING_LIMIT) \
+ { \
+ if (bytes_in_chunk > 0) \
+ { \
+ fputc ('\n', (FILE)); \
+ bytes_in_chunk = 0; \
+ } \
+ ASM_OUTPUT_LIMITED_STRING ((FILE), _ascii_bytes); \
+ _ascii_bytes = p; \
+ } \
+ else \
+ { \
+ if (bytes_in_chunk == 0) \
+ fprintf ((FILE), "\t.byte\t"); \
+ else \
+ fputc (',', (FILE)); \
+ fprintf ((FILE), "0x%02x", *_ascii_bytes); \
+ bytes_in_chunk += 5; \
+ } \
+ } \
+ if (bytes_in_chunk > 0) \
+ fprintf ((FILE), "\n"); \
+ } \
+ while (0)
+
+/* This is how to output an element of a case-vector that is relative.
+ This is only used for PIC code. See comments by the `casesi' insn in
+ i386.md for an explanation of the expression this outputs. */
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
diff --git a/contrib/gcc/config/i386/rtems.h b/contrib/gcc/config/i386/rtems.h
new file mode 100644
index 0000000..60e6dc7
--- /dev/null
+++ b/contrib/gcc/config/i386/rtems.h
@@ -0,0 +1,34 @@
+/* Definitions for rtems targeting an Intel i386 using coff.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "i386/i386-coff.h"
+
+/* Specify predefined symbols in preprocessor. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -Drtems -D__rtems__ \
+ -Asystem(rtems) -Acpu(i386) -Amachine(i386)"
+
+/* Generate calls to memcpy, memcmp and memset. */
+#ifndef TARGET_MEM_FUNCTIONS
+#define TARGET_MEM_FUNCTIONS
+#endif
+
diff --git a/contrib/gcc/config/i386/rtemself.h b/contrib/gcc/config/i386/rtemself.h
new file mode 100644
index 0000000..d9d9733
--- /dev/null
+++ b/contrib/gcc/config/i386/rtemself.h
@@ -0,0 +1,169 @@
+/* Definitions for Intel 386 running Linux-based GNU systems with ELF format.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Eric Youngdale.
+ Modified for stabs-in-ELF by H.J. Lu.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define LINUX_DEFAULT_ELF
+
+/* A lie, I guess, but the general idea behind linux/ELF is that we are
+ supposed to be outputting something that will assemble under SVr4.
+ This gets us pretty close. */
+#include <i386/i386.h> /* Base i386 target machine definitions */
+#include <i386/att.h> /* Use the i386 AT&T assembler syntax */
+#include <linux.h> /* some common stuff */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386 RTEMS with ELF)");
+
+/* The svr4 ABI for the i386 says that records and unions are returned
+ in memory. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 1
+
+/* This is how to output an element of a case-vector that is relative.
+ This is only used for PIC code. See comments by the `casesi' insn in
+ i386.md for an explanation of the expression this outputs. */
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
+
+/* Indicate that jump tables go in the text section. This is
+ necessary when compiling PIC code. */
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* Copy this from the svr4 specifications... */
+/* Define the register numbers to be used in Dwarf debugging information.
+ The SVR4 reference port C compiler uses the following register numbers
+ in its Dwarf output code:
+ 0 for %eax (gnu regno = 0)
+ 1 for %ecx (gnu regno = 2)
+ 2 for %edx (gnu regno = 1)
+ 3 for %ebx (gnu regno = 3)
+ 4 for %esp (gnu regno = 7)
+ 5 for %ebp (gnu regno = 6)
+ 6 for %esi (gnu regno = 4)
+ 7 for %edi (gnu regno = 5)
+ The following three DWARF register numbers are never generated by
+ the SVR4 C compiler or by the GNU compilers, but SDB on x86/svr4
+ believes these numbers have these meanings.
+ 8 for %eip (no gnu equivalent)
+ 9 for %eflags (no gnu equivalent)
+ 10 for %trapno (no gnu equivalent)
+ It is not at all clear how we should number the FP stack registers
+ for the x86 architecture. If the version of SDB on x86/svr4 were
+ a bit less brain dead with respect to floating-point then we would
+ have a precedent to follow with respect to DWARF register numbers
+ for x86 FP registers, but the SDB on x86/svr4 is so completely
+ broken with respect to FP registers that it is hardly worth thinking
+ of it as something to strive for compatibility with.
+ The version of x86/svr4 SDB I have at the moment does (partially)
+ seem to believe that DWARF register number 11 is associated with
+ the x86 register %st(0), but that's about all. Higher DWARF
+ register numbers don't seem to be associated with anything in
+ particular, and even for DWARF regno 11, SDB only seems to under-
+ stand that it should say that a variable lives in %st(0) (when
+ asked via an `=' command) if we said it was in DWARF regno 11,
+ but SDB still prints garbage when asked for the value of the
+ variable in question (via a `/' command).
+ (Also note that the labels SDB prints for various FP stack regs
+ when doing an `x' command are all wrong.)
+ Note that these problems generally don't affect the native SVR4
+ C compiler because it doesn't allow the use of -O with -g and
+ because when it is *not* optimizing, it allocates a memory
+ location for each floating-point variable, and the memory
+ location is what gets described in the DWARF AT_location
+ attribute for the variable in question.
+ Regardless of the severe mental illness of the x86/svr4 SDB, we
+ do something sensible here and we use the following DWARF
+ register numbers. Note that these are all stack-top-relative
+ numbers.
+ 11 for %st(0) (gnu regno = 8)
+ 12 for %st(1) (gnu regno = 9)
+ 13 for %st(2) (gnu regno = 10)
+ 14 for %st(3) (gnu regno = 11)
+ 15 for %st(4) (gnu regno = 12)
+ 16 for %st(5) (gnu regno = 13)
+ 17 for %st(6) (gnu regno = 14)
+ 18 for %st(7) (gnu regno = 15)
+*/
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+{ \
+ if (flag_pic) \
+ { \
+ fprintf (FILE, "\tleal %sP%d@GOTOFF(%%ebx),%%edx\n", \
+ LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall *mcount@GOT(%%ebx)\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tmovl $%sP%d,%%edx\n", LPREFIX, (LABELNO)); \
+ fprintf (FILE, "\tcall mcount\n"); \
+ } \
+}
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Di386 -Drtems -D__rtems__ \
+ -Asystem(rtems) -Acpu(i386) -Amachine(i386)"
+
+/* Get perform_* macros to build libgcc.a. */
+#include "i386/perform.h"
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s"
+
+#undef ENDFILE_SPEC
+
diff --git a/contrib/gcc/config/i386/sco.h b/contrib/gcc/config/i386/sco.h
index 37dc032..016e0a0 100644
--- a/contrib/gcc/config/i386/sco.h
+++ b/contrib/gcc/config/i386/sco.h
@@ -1,5 +1,5 @@
/* Definitions for Intel 386 running SCO Unix System V.
- Copyright (C) 1988, 1992, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1992, 1994, 1995 Free, 1996 Software Foundation, Inc.
This file is part of GNU CC.
@@ -55,10 +55,10 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -DM_UNIX -DM_I386 -DM_COFF -DM_WORDSWAP -Asystem(unix) -Asystem(svr3) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -DM_UNIX -DM_I386 -DM_COFF -DM_WORDSWAP -Asystem(svr3)"
#undef CPP_SPEC
-#define CPP_SPEC "%{scointl:-DM_INTERNAT}"
+#define CPP_SPEC "%(cpp_cpu) %{scointl:-DM_INTERNAT}"
/* This spec is used for telling cpp whether char is signed or not. */
@@ -104,7 +104,7 @@ Boston, MA 02111-1307, USA. */
#undef RETURN_POPS_ARGS
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
- (TREE_CODE (FUNTYPE) == IDENTIFIER_NODE ? 0 \
+ ((FUNDECL) && TREE_CODE (FUNDECL) == IDENTIFIER_NODE ? 0 \
: (TARGET_RTD \
&& (TYPE_ARG_TYPES (FUNTYPE) == 0 \
|| (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
diff --git a/contrib/gcc/config/i386/sco4.h b/contrib/gcc/config/i386/sco4.h
index fc389b4..5d1ea47 100644
--- a/contrib/gcc/config/i386/sco4.h
+++ b/contrib/gcc/config/i386/sco4.h
@@ -63,11 +63,11 @@ Boston, MA 02111-1307, USA. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
- "-Asystem(unix) -Asystem(svr3) -Acpu(i386) -Amachine(i386)"
+ "-Asystem(svr3)"
#undef CPP_SPEC
-#define CPP_SPEC \
- "-D_i386 -D_M_I386 -D_M_I86 -D_M_I86SM -D_M_SDATA -D_M_STEXT \
+#define CPP_SPEC "%(cpp_cpu) \
+ -D_i386 -D_M_I386 -D_M_I86 -D_M_I86SM -D_M_SDATA -D_M_STEXT \
-D_unix -D_M_UNIX -D_M_XENIX \
-D_M_SYS5 -D_M_SYSV -D_M_SYS3 -D_M_SYSIII \
-D_M_COFF -D_M_BITFIELDS -D_M_WORDSWAP \
diff --git a/contrib/gcc/config/i386/sco4dbx.h b/contrib/gcc/config/i386/sco4dbx.h
index 0387c24..3d075b6 100644
--- a/contrib/gcc/config/i386/sco4dbx.h
+++ b/contrib/gcc/config/i386/sco4dbx.h
@@ -61,11 +61,11 @@ Boston, MA 02111-1307, USA. */
Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Di386 -Dunix -Asystem(unix) -Asystem(svr3) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -Asystem(svr3)"
#undef CPP_SPEC
-#define CPP_SPEC \
- "-D_M_I386 -D_M_I86 -D_M_I86SM -D_M_SDATA -D_M_STEXT \
+#define CPP_SPEC "%(cpp_cpu) \
+ -D_M_I386 -D_M_I86 -D_M_I86SM -D_M_SDATA -D_M_STEXT \
-D_M_UNIX -D_M_XENIX \
-D_M_SYS5 -D_M_SYSV -D_M_SYS3 -D_M_SYSIII \
-D_M_COFF -D_M_BITFIELDS -D_M_WORDSWAP \
diff --git a/contrib/gcc/config/i386/sco5.h b/contrib/gcc/config/i386/sco5.h
new file mode 100644
index 0000000..74fb891
--- /dev/null
+++ b/contrib/gcc/config/i386/sco5.h
@@ -0,0 +1,957 @@
+/* Definitions for Intel 386 running SCO Unix System V 3.2 Version 5.
+ Copyright (C) 1992, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Kean Johnston (hug@netcom.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "i386/i386.h" /* Base i386 target definitions */
+#include "i386/att.h" /* Use AT&T i386 assembler syntax */
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (i386, SCO OpenServer 5 Syntax)");
+
+#undef LPREFIX
+#define LPREFIX ".L"
+
+#undef ALIGN_ASM_OP
+#define ALIGN_ASM_OP "\t.align"
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP "\t.ascii"
+
+#undef ASM_BYTE_OP
+#define ASM_BYTE_OP "\t.byte"
+
+#undef IDENT_ASM_OP
+#define IDENT_ASM_OP "\t.ident"
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.comm"
+
+#undef SET_ASM_OP
+#define SET_ASM_OP "\t.set"
+
+#undef LOCAL_ASM_OP
+#define LOCAL_ASM_OP "\t.local"
+
+#undef INT_ASM_OP
+#define INT_ASM_OP "\t.long"
+
+#undef ASM_SHORT
+#define ASM_SHORT "\t.value"
+
+#undef ASM_LONG
+#define ASM_LONG "\t.long"
+
+#undef ASM_DOUBLE
+#define ASM_DOUBLE "\t.double"
+
+#undef TYPE_ASM_OP
+#define TYPE_ASM_OP "\t.type"
+
+#undef SIZE_ASM_OP
+#define SIZE_ASM_OP "\t.size"
+
+#undef STRING_ASM_OP
+#define STRING_ASM_OP "\t.string"
+
+#undef SKIP_ASM_OP
+#define SKIP_ASM_OP "\t.zero"
+
+#undef GLOBAL_ASM_OP
+#define GLOBAL_ASM_OP "\t.globl"
+
+#undef EH_FRAME_SECTION_ASM_OP
+#define EH_FRAME_SECTION_ASM_OP_COFF "\t.section\t.ehfram, \"x\""
+#define EH_FRAME_SECTION_ASM_OP_ELF "\t.section\t.eh_frame, \"aw\""
+#define EH_FRAME_SECTION_ASM_OP \
+ ((TARGET_ELF) ? EH_FRAME_SECTION_ASM_OP_ELF : EH_FRAME_SECTION_ASM_OP_COFF)
+
+/* Avoid problems (long sectino names, forward assembler refs) with DWARF
+ exception unwinding when we're generating COFF */
+#define DWARF2_UNWIND_INFO \
+ ((TARGET_ELF) ? 1 : 0 )
+
+#undef CONST_SECTION_ASM_OP
+#define CONST_SECTION_ASM_OP_COFF "\t.section\t.rodata, \"x\""
+#define CONST_SECTION_ASM_OP_ELF "\t.section\t.rodata"
+#define CONST_SECTION_ASM_OP \
+ ((TARGET_ELF) ? CONST_SECTION_ASM_OP_ELF : CONST_SECTION_ASM_OP_COFF)
+
+#undef USE_CONST_SECTION
+#define USE_CONST_SECTION_ELF 1
+#define USE_CONST_SECTION_COFF 0
+#define USE_CONST_SECTION \
+ ((TARGET_ELF) ? USE_CONST_SECTION_ELF : USE_CONST_SECTION_COFF)
+
+#undef INIT_SECTION_ASM_OP
+#define INIT_SECTION_ASM_OP_ELF "\t.section\t.init"
+#define INIT_SECTION_ASM_OP_COFF "\t.section\t.init ,\"x\""
+#define INIT_SECTION_ASM_OP \
+ ((TARGET_ELF) ? INIT_SECTION_ASM_OP_ELF : INIT_SECTION_ASM_OP_COFF)
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP_ELF "\t.section\t.ctors,\"aw\""
+#define CTORS_SECTION_ASM_OP_COFF INIT_SECTION_ASM_OP_COFF
+#define CTORS_SECTION_ASM_OP \
+ ((TARGET_ELF) ? CTORS_SECTION_ASM_OP_ELF : CTORS_SECTION_ASM_OP_COFF)
+
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP_ELF "\t.section\t.dtors, \"aw\""
+#define DTORS_SECTION_ASM_OP_COFF FINI_SECTION_ASM_OP_COFF
+#define DTORS_SECTION_ASM_OP \
+ ((TARGET_ELF) ? DTORS_SECTION_ASM_OP_ELF : DTORS_SECTION_ASM_OP_COFF)
+
+#undef FINI_SECTION_ASM_OP
+#define FINI_SECTION_ASM_OP_ELF "\t.section\t.fini"
+#define FINI_SECTION_ASM_OP_COFF "\t.section\t.fini, \"x\""
+#define FINI_SECTION_ASM_OP \
+ ((TARGET_ELF) ? FINI_SECTION_ASM_OP_ELF : FINI_SECTION_ASM_OP_COFF)
+
+#undef BSS_SECTION_ASM_OP
+#define BSS_SECTION_ASM_OP "\t.data"
+
+#undef TEXT_SECTION_ASM_OP
+#define TEXT_SECTION_ASM_OP "\t.text"
+
+#undef DATA_SECTION_ASM_OP
+#define DATA_SECTION_ASM_OP "\t.data"
+
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+
+#undef APPLY_RESULT_SIZE
+#define APPLY_RESULT_SIZE \
+(TARGET_ELF) ? size : 116
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+#define SCO_DEFAULT_ASM_COFF(FILE,NAME) \
+do { \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } while (0)
+
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ if (TARGET_ELF) { \
+ fprintf (FILE, "%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } else \
+ SCO_DEFAULT_ASM_COFF(FILE, NAME); \
+} while (0)
+
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (TARGET_ELF) { if (!flag_inhibit_size_directive) \
+ { \
+ fprintf (FILE, "%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ",.-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } } \
+ } while (0)
+
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ if (TARGET_ELF) { \
+ fprintf (FILE, "%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } else \
+ SCO_DEFAULT_ASM_COFF(FILE, NAME); \
+ } while (0)
+
+#undef ASM_FILE_START_1
+#define ASM_FILE_START_1(FILE)
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+do { \
+ output_file_directive((FILE),main_input_filename); \
+ fprintf ((FILE), "\t.version\t\"01.01\"\n"); \
+} while (0)
+
+#undef ASM_FILE_END
+#define ASM_FILE_END(FILE) \
+do { \
+ fprintf ((FILE), "%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
+} while (0)
+
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ if (TARGET_ELF) { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ } \
+} while (0)
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+do { \
+ if (TARGET_ELF) \
+ sprintf (LABEL, "*.%s%d", (PREFIX), (NUM)); \
+ else \
+ sprintf (LABEL, ".%s%d", (PREFIX), (NUM)); \
+} while (0)
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ if (TARGET_ELF) \
+ fprintf (FILE, "%s _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", ASM_LONG, LPREFIX, VALUE); \
+ else \
+ fprintf (FILE, "\t.word %s%d-%s%d\n", LPREFIX,VALUE,LPREFIX,REL); \
+} while (0)
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "%s\t", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ if (TARGET_ELF) \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+ else \
+ fprintf ((FILE), ",%u\n", (SIZE)); \
+} while (0)
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ if (TARGET_ELF) { \
+ fprintf ((FILE), "%s\t", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+ } else { \
+ int align = exact_log2 (ALIGN); \
+ if (align > 2) align = 2; \
+ if (TARGET_SVR3_SHLIB) \
+ data_section (); \
+ else \
+ bss_section (); \
+ ASM_OUTPUT_ALIGN ((FILE), align == -1 ? 2 : align); \
+ fprintf ((FILE), "%s\t", "\t.lcomm"); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u\n", (SIZE)); \
+ } \
+} while (0)
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
+
+#undef ESCAPES
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+#undef STRING_LIMIT
+#define STRING_LIMIT ((unsigned) 256)
+
+#undef ASM_OUTPUT_LIMITED_STRING
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ do \
+ { \
+ register unsigned char *_limited_str = (unsigned char *) (STR); \
+ register unsigned ch; \
+ fprintf ((FILE), "%s\t\"", STRING_ASM_OP); \
+ for (; (ch = *_limited_str); _limited_str++) \
+ { \
+ register int escape; \
+ switch (escape = ESCAPES[ch]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ break; \
+ } \
+ } \
+ fprintf ((FILE), "\"\n"); \
+ } \
+ while (0)
+
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
+do { \
+ register unsigned char *_ascii_bytes = (unsigned char *) (STR); \
+ register unsigned char *limit = _ascii_bytes + (LENGTH); \
+ register unsigned bytes_in_chunk = 0; \
+ for (; _ascii_bytes < limit; _ascii_bytes++) \
+ { \
+ register unsigned char *p; \
+ if (bytes_in_chunk >= 64) \
+ { \
+ fputc ('\n', (FILE)); \
+ bytes_in_chunk = 0; \
+ } \
+ for (p = _ascii_bytes; p < limit && *p != '\0'; p++) \
+ continue; \
+ if (p < limit && (p - _ascii_bytes) <= STRING_LIMIT) \
+ { \
+ if (bytes_in_chunk > 0) \
+ { \
+ fputc ('\n', (FILE)); \
+ bytes_in_chunk = 0; \
+ } \
+ ASM_OUTPUT_LIMITED_STRING ((FILE), _ascii_bytes); \
+ _ascii_bytes = p; \
+ } \
+ else \
+ { \
+ if (bytes_in_chunk == 0) \
+ fprintf ((FILE), "%s\t", ASM_BYTE_OP); \
+ else \
+ fputc (',', (FILE)); \
+ fprintf ((FILE), "0x%02x", *_ascii_bytes); \
+ bytes_in_chunk += 5; \
+ } \
+ } \
+ if (bytes_in_chunk > 0) \
+ fprintf ((FILE), "\n"); \
+} while (0)
+
+/* Must use data section for relocatable constants when pic. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) \
+{ \
+ if (TARGET_ELF) { \
+ if (flag_pic && symbolic_operand (RTX)) \
+ data_section (); \
+ else \
+ const_section (); \
+ } else \
+ readonly_data_section(); \
+}
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+do { \
+ if (TARGET_ELF) \
+ ASM_OUTPUT_ALIGN ((FILE), 2); \
+ ASM_OUTPUT_INTERNAL_LABEL((FILE),(PREFIX),(NUM)); \
+} while (0)
+
+
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+do { \
+ if (TARGET_ELF) { \
+ ctors_section (); \
+ fprintf (FILE, "%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } else { \
+ init_section (); \
+ fprintf (FILE, "\tpushl $"); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); } \
+ } while (0)
+
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+do { \
+ if (TARGET_ELF) { \
+ dtors_section (); \
+ fprintf (FILE, "%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } else { \
+ fini_section (); \
+ fprintf (FILE, "%s\t ", ASM_LONG); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); } \
+ } while (0)
+
+
+#undef ASM_OUTPUT_IDENT
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+#undef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ (fprintf ((FILE), "%s ", GLOBAL_ASM_OP), assemble_name (FILE, NAME), fputs ("\n", FILE))
+
+#undef ASM_OUTPUT_EXTERNAL_LIBCALL
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ if (TARGET_ELF) ASM_GLOBALIZE_LABEL (FILE, XSTR (FUN, 0))
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/*
+ * Compensate for the difference between ELF and COFF assembler syntax.
+ * Otherwise, this is cribbed from ../svr4.h.
+ * We rename 'gcc_except_table' to the shorter name in preparation
+ * for the day when we're ready to do DWARF2 eh unwinding under COFF
+ */
+#undef ASM_OUTPUT_SECTION_NAME
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+do { \
+ static struct section_info \
+ { \
+ struct section_info *next; \
+ char *name; \
+ enum sect_enum {SECT_RW, SECT_RO, SECT_EXEC} type; \
+ } *sections; \
+ struct section_info *s; \
+ char *mode; \
+ enum sect_enum type; \
+ char *sname = NAME ; \
+ if (strcmp(NAME, ".gcc_except_table") == 0) sname = ".gccexc" ; \
+ \
+ for (s = sections; s; s = s->next) \
+ if (!strcmp (NAME, s->name)) \
+ break; \
+ \
+ if (DECL && TREE_CODE (DECL) == FUNCTION_DECL) \
+ type = SECT_EXEC, mode = (TARGET_ELF) ? "ax" : "x" ; \
+ else if (DECL && DECL_READONLY_SECTION (DECL, RELOC)) \
+ type = SECT_RO, mode = "a"; \
+ else \
+ type = SECT_RW, mode = (TARGET_ELF) ? "aw" : "w" ; \
+ \
+ if (s == 0) \
+ { \
+ s = (struct section_info *) xmalloc (sizeof (struct section_info)); \
+ s->name = xmalloc ((strlen (NAME) + 1) * sizeof (*NAME)); \
+ strcpy (s->name, NAME); \
+ s->type = type; \
+ s->next = sections; \
+ sections = s; \
+ fprintf (FILE, ".section\t%s,\"%s\"%s\n", sname, mode, \
+ (TARGET_ELF) ? ",@progbits" : "" ); \
+ } \
+ else \
+ { \
+ if (DECL && s->type != type) \
+ error_with_decl (DECL, "%s causes a section type conflict"); \
+ \
+ fprintf (FILE, ".section\t%s\n", sname); \
+ } \
+} while (0)
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+do { \
+ if (TARGET_ELF) \
+ fprintf (FILE, "%s\t%u\n", SKIP_ASM_OP, (SIZE)); \
+ else \
+ fprintf ((FILE), "%s\t.,.+%u\n", SET_ASM_OP, (SIZE)); \
+} while (0)
+
+
+#undef CTOR_LIST_BEGIN
+#define CTOR_LIST_BEGIN \
+do { \
+ asm (CTORS_SECTION_ASM_OP); \
+ if (TARGET_ELF) \
+ STATIC func_ptr __CTOR_LIST__[1] = { (func_ptr) (-1) }; \
+ else \
+ asm ("pushl $0"); \
+} while (0)
+
+#undef CTOR_LIST_END
+#define CTOR_LIST_END \
+do { \
+ if (TARGET_ELF) { \
+ asm (CTORS_SECTION_ASM_OP); \
+ STATIC func_ptr __CTOR_LIST__[1] = { (func_ptr) (0) }; \
+ } else { \
+ CTOR_LIST_BEGIN; \
+ } \
+} while (0)
+
+#undef DBX_BLOCKS_FUNCTION_RELATIVE
+#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+
+#undef DBX_FUNCTION_FIRST
+#define DBX_FUNCTION_FIRST 1
+
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(n) \
+((TARGET_ELF) ? \
+ ((n) == 0 ? 0 \
+ : (n) == 1 ? 2 \
+ : (n) == 2 ? 1 \
+ : (n) == 3 ? 3 \
+ : (n) == 4 ? 6 \
+ : (n) == 5 ? 7 \
+ : (n) == 6 ? 5 \
+ : (n) == 7 ? 4 \
+ : ((n) >= FIRST_STACK_REG && (n) <= LAST_STACK_REG) ? (n)+3 \
+ : (-1)) \
+ : \
+ ((n) == 0 ? 0 : \
+ (n) == 1 ? 2 : \
+ (n) == 2 ? 1 : \
+ (n) == 3 ? 3 : \
+ (n) == 4 ? 6 : \
+ (n) == 5 ? 7 : \
+ (n) == 6 ? 4 : \
+ (n) == 7 ? 5 : \
+ (n) + 4))
+
+#undef DWARF_DEBUGGING_INFO
+#undef SDB_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#undef PREFERRED_DEBUGGING_TYPE
+
+#define DWARF_DEBUGGING_INFO 1
+#define SDB_DEBUGGING_INFO 1
+#define DBX_DEBUGGING_INFO 1
+#define PREFERRED_DEBUGGING_TYPE \
+ ((TARGET_ELF) ? DWARF_DEBUG: SDB_DEBUG)
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_init, in_fini, in_ctors, in_dtors
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ INIT_SECTION_FUNCTION \
+ FINI_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#undef CONST_SECTION_FUNCTION
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ extern void text_section(); \
+ if (!USE_CONST_SECTION) \
+ text_section(); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+#undef FINI_SECTION_FUNCTION
+#define FINI_SECTION_FUNCTION \
+void \
+fini_section () \
+{ \
+ if ((!TARGET_ELF) && in_section != in_fini) \
+ { \
+ fprintf (asm_out_file, "%s\n", FINI_SECTION_ASM_OP); \
+ in_section = in_fini; \
+ } \
+}
+
+#undef INIT_SECTION_FUNCTION
+#define INIT_SECTION_FUNCTION \
+void \
+init_section () \
+{ \
+ if ((!TARGET_ELF) && in_section != in_init) \
+ { \
+ fprintf (asm_out_file, "%s\n", INIT_SECTION_ASM_OP); \
+ in_section = in_init; \
+ } \
+}
+
+#undef CTORS_SECTION_FUNCTION
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#undef DTORS_SECTION_FUNCTION
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#undef FRAME_POINTER_REQUIRED
+#define FRAME_POINTER_REQUIRED \
+ ((TARGET_ELF) ? 0 : \
+ (current_function_calls_setjmp || current_function_calls_longjmp))
+
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION (TARGET_ELF && flag_pic)
+
+#undef LOCAL_LABEL_PREFIX
+#define LOCAL_LABEL_PREFIX \
+ ((TARGET_ELF) ? "" : ".")
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+#define MD_EXEC_PREFIX "/usr/ccs/bin/"
+#define MD_STARTFILE_PREFIX "/usr/ccs/lib/"
+
+#undef NON_SAVING_SETJMP
+#define NON_SAVING_SETJMP \
+ ((TARGET_ELF) ? 0 : \
+ (current_function_calls_setjmp && current_function_calls_longjmp))
+
+#undef NO_IMPLICIT_EXTERN_C
+#define NO_IMPLICIT_EXTERN_C 1
+
+/* JKJ FIXME - examine the ramifications of RETURN_IN_MEMORY and
+ RETURN_POPS_ARGS */
+
+#undef RETURN_POPS_ARGS
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
+ ((TARGET_ELF) ? \
+ (i386_return_pops_args (FUNDECL, FUNTYPE, SIZE)) : \
+ (((FUNDECL) && (TREE_CODE (FUNDECL) == IDENTIFIER_NODE)) ? 0 \
+ : (TARGET_RTD \
+ && (TYPE_ARG_TYPES (FUNTYPE) == 0 \
+ || (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
+ == void_type_node))) ? (SIZE) \
+ : 0))
+
+#undef SELECT_SECTION
+#define SELECT_SECTION(DECL,RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ const_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((TARGET_ELF && flag_pic && RELOC) \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ data_section (); \
+ else \
+ const_section (); \
+ } \
+ else \
+ const_section (); \
+}
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'Y' \
+ || (CHAR) == 'z')
+
+#undef WORD_SWITCH_TAKES_ARG
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \
+ && strcmp (STR, "Tbss"))
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT 0301
+
+#undef HANDLE_SYSV_PRAGMA
+#define HANDLE_SYSV_PRAGMA 1
+
+#undef SCCS_DIRECTIVE
+#define SCCS_DIRECTIVE 1
+
+/*
+ * Define sizes and types
+ */
+#undef SIZE_TYPE
+#undef PTRDIFF_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 96
+#define SIZE_TYPE "unsigned int"
+#define PTRDIFF_TYPE "int"
+#define WCHAR_TYPE "long int"
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/*
+ * New for multilib support. Set the default switches for multilib,
+ * which is -melf.
+ */
+#define MULTILIB_DEFAULTS { "melf" }
+
+
+/* Please note that these specs may look messy but they are required in
+ order to emulate the SCO Development system as closely as possible.
+ With SCO Open Server 5.0, you now get the linker and assembler free,
+ so that is what these specs are targeted for. These utilities are
+ very argument sensitive: a space in the wrong place breaks everything.
+ So RMS, please forgive this mess. It works.
+
+ Parameters which can be passed to gcc, and their SCO equivalents:
+ GCC Parameter SCO Equivalent
+ -ansi -a ansi
+ -posix -a posix
+ -Xpg4 -a xpg4
+ -Xpg4plus -a xpg4plus
+ -Xods30 -a ods30
+
+ As with SCO, the default is XPG4 plus mode. SCO also allows you to
+ specify a C dialect with -Xt, -Xa, -Xc, -Xk and -Xm. These are passed
+ on to the assembler and linker in the same way that the SCO compiler
+ does.
+
+ SCO also allows you to compile, link and generate either ELF or COFF
+ binaries. With gcc, unlike the SCO compiler, the default is ELF.
+ Specify -mcoff to gcc to produce COFF binaries. -fpic will get the
+ assembler and linker to produce PIC code.
+*/
+
+/* Set up assembler flags for PIC and ELF compilations */
+#undef ASM_SPEC
+
+#if USE_GAS
+ /* Leave ASM_SPEC undefined so we pick up the master copy from gcc.c
+ * Undef MD_EXEC_PREFIX becuase we don't know where GAS is, but it's not
+ * likely in /usr/ccs/bin/
+ */
+#undef MD_EXEC_PREFIX
+#else
+
+#define ASM_SPEC \
+ "-b %{!mcoff:elf}%{mcoff:coff \
+ %{static:%e-static not valid with -mcoff} \
+ %{shared:%e-shared not valid with -mcoff} \
+ %{symbolic:%e-symbolic not valid with -mcoff}} \
+ %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+ %{!mcoff:-E%{Xa:a}%{!Xa:%{Xc:c}%{!Xc:%{Xk:k}%{!Xk:%{Xt:t}%{!Xt:a}}}},%{ansi:ansi}%{!ansi:%{posix:posix}%{!posix:%{Xpg4:xpg4}%{!Xpg4:%{Xpg4plus:XPG4PLUS}%{!Xpg4plus:%{Xods30:ods30}%{!Xods30:XPG4PLUS}}}}},ELF %{Qn:} %{!Qy:-Qn}}"
+#endif
+
+/* Use crt1.o as a startup file and crtn.o as a closing file. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{shared: %{!mcoff: crti.o%s}} \
+ %{!shared:\
+ %{!symbolic: \
+ %{pg:gcrt.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}} \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional: \
+ %{Xa:values-Xa.o%s} \
+ %{!Xa:%{Xc:values-Xc.o%s} \
+ %{!Xc:%{Xk:values-Xk.o%s} \
+ %{!Xk:%{Xt:values-Xt.o%s} \
+ %{!Xt:values-Xa.o%s}}}}}} \
+ %{mcoff:crtbeginS.o%s} %{!mcoff:crtbegin.o%s}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!mcoff:crtend.o%s} \
+ %{mcoff:crtendS.o%s} \
+ %{pg:gcrtn.o%s}%{!pg:crtn.o%s}"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-Asystem(svr3)"
+
+/* You are in a maze of GCC specs ... all alike */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) \
+ %{fpic:%{mcoff:%e-fpic is not valid with -mcoff}} \
+ %{fPIC:%{mcoff:%e-fPIC is not valid with -mcoff}} \
+ -D__i386 -D__unix -D_SCO_DS=1 -D_M_I386 -D_M_XENIX -D_M_UNIX \
+ %{!Xods30:-D_STRICT_NAMES} \
+ %{!ansi:%{!posix:%{!Xods30:-D_SCO_XPG_VERS=4}}} \
+ %{ansi:-isystem include/ansi%s -isystem /usr/include/ansi -D_STRICT_ANSI} \
+ %{!ansi: \
+ %{posix:-isystem include/posix%s -isystem /usr/include/posix \
+ -D_POSIX_C_SOURCE=2 -D_POSIX_SOURCE=1} \
+ %{!posix:%{Xpg4:-isystem include/xpg4%s -isystem /usr/include/xpg4 \
+ -D_XOPEN_SOURCE=1} \
+ %{!Xpg4:-D_M_I86 -D_M_I86SM -D_M_INTERNAT -D_M_SDATA -D_M_STEXT \
+ -D_M_BITFIELDS -D_M_SYS5 -D_M_SYSV -D_M_SYSIII \
+ -D_M_WORDSWAP -Dunix -DM_I386 -DM_UNIX -DM_XENIX \
+ %{Xods30:-isystem include/ods_30_compat%s \
+ -isystem /usr/include/ods_30_compat \
+ -D_SCO_ODS_30 -DM_I86 -DM_I86SM -DM_SDATA -DM_STEXT \
+ -DM_BITFIELDS -DM_SYS5 -DM_SYSV -DM_INTERNAT -DM_SYSIII \
+ -DM_WORDSWAP}}}} \
+ %{scointl:-DM_INTERNAT -D_M_INTERNAT} \
+ %{traditional:-D_KR -D_SVID -D_NO_PROTOTYPE} \
+ %{!mcoff:-D_SCO_ELF} \
+ %{mcoff:-D_M_COFF -D_SCO_COFF} \
+ %{!mcoff:%{fpic:-D__PIC__ -D__pic__} \
+ %{fPIC:%{!fpic:-D__PIC__ -D__pic__}}} \
+ %{Xa:-D_SCO_C_DIALECT=1} \
+ %{!Xa:%{Xc:-D_SCO_C_DIALECT=3} \
+ %{!Xc:%{Xk:-D_SCO_C_DIALECT=4} \
+ %{!Xk:%{Xt:-D_SCO_C_DIALECT=2} \
+ %{!Xt:-D_SCO_C_DIALECT=1}}}} \
+ %{traditional:-traditional -D_KR -D_NO_PROTOTYPE}"
+
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "-b %{!mcoff:elf}%{mcoff:coff \
+ %{static:%e-static not valid with -mcoff} \
+ %{shared:%e-shared not valid with -mcoff} \
+ %{symbolic:%e-symbolic not valid with -mcoff} \
+ %{fpic:%e-fpic not valid with -mcoff} \
+ %{fPIC:%e-fPIC not valid with -mcoff}} \
+ -R%{Xa:a}%{!Xa:%{Xc:c}%{!Xc:%{Xk:k}%{!Xk:%{Xt:t}%{!Xt:a}}}},%{ansi:ansi}%{!ansi:%{posix:posix}%{!posix:%{Xpg4:xpg4}%{!Xpg4:%{Xpg4plus:XPG4PLUS}%{!Xpg4plus:%{Xods30:ods30}%{!Xods30:XPG4PLUS}}}}},%{mcoff:COFF}%{!mcoff:ELF} \
+ %{Wl,*%*} %{YP,*} %{YL,*} %{YU,*} \
+ %{!YP,*:%{p:-YP,/usr/ccs/libp:/lib/libp:/usr/lib/libp:/usr/ccs/lib:/lib:/usr/lib} \
+ %{!p:-YP,/usr/ccs/lib:/lib:/usr/lib}} \
+ %{h*} %{static:-dn -Bstatic} %{shared:-G -dy %{!z*:-z text}} \
+ %{symbolic:-Bsymbolic -G -dy %{!z*:-z text}} %{z*} %{R*} %{Y*} \
+ %{G:-G} %{!mcoff:%{Qn:} %{!Qy:-Qn}}"
+
+/* The SCO COFF linker gets confused on the difference between "-ofoo"
+ and "-o foo". So we just always force a single space. */
+
+#define SWITCHES_NEED_SPACES "o"
+
+/* Library spec. If we are not building a shared library, provide the
+ standard libraries, as per the SCO compiler. */
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{shared:pic/libgcc.a%s}%{!shared:%{!symbolic:-lcrypt -lgen -lc}}"
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC \
+ "%{!shared:-lgcc}"
+
+#define MASK_COFF 010000000000 /* Mask for elf generation */
+#define TARGET_COFF (target_flags & MASK_COFF)
+#define TARGET_ELF (!(target_flags & MASK_COFF))
+
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+ { "coff", MASK_COFF }, \
+ { "elf", -MASK_COFF },
+
+#define NO_DOLLAR_IN_LABEL
+
+/*
+Here comes some major hackery to get the crt stuff to compile properly.
+Since we can (and do) compile for both COFF and ELF environments, we
+set things up accordingly, based on the pre-processor defines for ELF
+and COFF. This is insane, but then I guess having one compiler with a
+single back-end supporting two vastly different file format types is
+a little insane too. But it is not impossible and we get a useful
+compiler at the end of the day. Onward we go ...
+*/
+
+#if defined(CRT_BEGIN) || defined(CRT_END) || defined(IN_LIBGCC2)
+# undef OBJECT_FORMAT_ELF
+# undef HAVE_ATEXIT
+# undef INIT_SECTION_ASM_OP
+# undef FINI_SECTION_ASM_OP
+# undef CTORS_SECTION_ASM_OP
+# undef DTORS_SECTION_ASM_OP
+# undef EH_FRAME_SECTION_ASM_OP
+# undef CTOR_LIST_BEGIN
+# undef CTOR_LIST_END
+# undef DO_GLOBAL_CTORS_BODY
+
+# if defined (_SCO_ELF)
+# define OBJECT_FORMAT_ELF
+# define HAVE_ATEXIT 1
+# define INIT_SECTION_ASM_OP INIT_SECTION_ASM_OP_ELF
+# define FINI_SECTION_ASM_OP FINI_SECTION_ASM_OP_ELF
+# define DTORS_SECTION_ASM_OP DTORS_SECTION_ASM_OP_ELF
+# define CTORS_SECTION_ASM_OP CTORS_SECTION_ASM_OP_ELF
+# define EH_FRAME_SECTION_ASM_OP EH_FRAME_SECTION_ASM_OP_ELF
+# else /* ! _SCO_ELF */
+# define INIT_SECTION_ASM_OP INIT_SECTION_ASM_OP_COFF
+# define FINI_SECTION_ASM_OP FINI_SECTION_ASM_OP_COFF
+# define DTORS_SECTION_ASM_OP DTORS_SECTION_ASM_OP_COFF
+# define CTORS_SECTION_ASM_OP CTORS_SECTION_ASM_OP_COFF
+# define EH_FRAME_SECTION_ASM_OP ""
+# define CTOR_LIST_BEGIN asm (INIT_SECTION_ASM_OP); asm ("pushl $0")
+# define CTOR_LIST_END CTOR_LIST_BEGIN
+# define DO_GLOBAL_CTORS_BODY \
+do { \
+ func_ptr *p, *beg = alloca(0); \
+ for (p = beg; *p;) \
+ (*p++) (); \
+} while (0)
+# endif /* ! _SCO_ELF */
+#endif /* CRT_BEGIN !! CRT_END */
diff --git a/contrib/gcc/config/i386/sco5gas.h b/contrib/gcc/config/i386/sco5gas.h
new file mode 100644
index 0000000..de3e5d5
--- /dev/null
+++ b/contrib/gcc/config/i386/sco5gas.h
@@ -0,0 +1,24 @@
+/* Definitions for Intel x86 running SCO OpenServer, running GNU assembler
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* Just set a single flag we can test for inside of sco5.h and include it. */
+
+#define USE_GAS 1
diff --git a/contrib/gcc/config/i386/scodbx.h b/contrib/gcc/config/i386/scodbx.h
index 1309735..a2581d4 100644
--- a/contrib/gcc/config/i386/scodbx.h
+++ b/contrib/gcc/config/i386/scodbx.h
@@ -1,6 +1,6 @@
/* Definitions for Intel 386 running SCO Unix System V,
using dbx-in-coff encapsulation.
- Copyright (C) 1992, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1995, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -50,10 +50,10 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
#undef CPP_PREDEFINES
-#define CPP_PREDEFINES "-Dunix -Di386 -DM_UNIX -DM_I386 -DM_COFF -DM_WORDSWAP -Asystem(unix) -Asystem(svr3) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -DM_UNIX -DM_I386 -DM_COFF -DM_WORDSWAP -Asystem(svr3)"
#undef CPP_SPEC
-#define CPP_SPEC "%{scointl:-DM_INTERNAT}"
+#define CPP_SPEC " -Acpu(i386) -Amachine(i386) %{scointl:-DM_INTERNAT}"
/* This spec is used for telling cpp whether char is signed or not. */
@@ -75,7 +75,7 @@ Boston, MA 02111-1307, USA. */
#undef RETURN_POPS_ARGS
#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) \
- (TREE_CODE (FUNTYPE) == IDENTIFIER_NODE ? 0 \
+ ((FUNDECL) && TREE_CODE (FUNDECL) == IDENTIFIER_NODE ? 0 \
: (TARGET_RTD \
&& (TYPE_ARG_TYPES (FUNTYPE) == 0 \
|| (TREE_VALUE (tree_last (TYPE_ARG_TYPES (FUNTYPE))) \
diff --git a/contrib/gcc/config/i386/seq-gas.h b/contrib/gcc/config/i386/seq-gas.h
index 2ee0719..796eaa2 100644
--- a/contrib/gcc/config/i386/seq-gas.h
+++ b/contrib/gcc/config/i386/seq-gas.h
@@ -39,8 +39,8 @@
GAS requires the %cl argument, so override i386/unix.h. */
-#undef AS3_SHIFT_DOUBLE
-#define AS3_SHIFT_DOUBLE(a,b,c,d) AS3 (a,b,c,d)
+#undef SHIFT_DOUBLE_OMITS_COUNT
+#define SHIFT_DOUBLE_OMITS_COUNT 0
/* Print opcodes the way that GAS expects them. */
#define GAS_MNEMONICS 1
diff --git a/contrib/gcc/config/i386/seq-sysv3.h b/contrib/gcc/config/i386/seq-sysv3.h
index e3182ee..9e8388d 100644
--- a/contrib/gcc/config/i386/seq-sysv3.h
+++ b/contrib/gcc/config/i386/seq-sysv3.h
@@ -40,17 +40,3 @@
/* Assembler pseudo-op for uninitialized shared local variables (.shbss). */
#undef SHARED_BSS_SECTION_ASM_OP
#define SHARED_BSS_SECTION_ASM_OP ".section .shbss, \"bs\""
-#undef BSS_SECTION_FUNCTION
-#define BSS_SECTION_FUNCTION \
-void \
-bss_section () \
-{ \
- if (in_section != in_bss) \
- { \
- if (flag_shared_data) \
- fprintf (asm_out_file, "%s\n", SHARED_BSS_SECTION_ASM_OP); \
- else \
- fprintf (asm_out_file, "%s\n", BSS_SECTION_ASM_OP); \
- in_section = in_bss; \
- } \
-}
diff --git a/contrib/gcc/config/i386/sol2-c1.asm b/contrib/gcc/config/i386/sol2-c1.asm
index 72fdfb8..d08bcbd 100644
--- a/contrib/gcc/config/i386/sol2-c1.asm
+++ b/contrib/gcc/config/i386/sol2-c1.asm
@@ -1,6 +1,6 @@
! crt1.s for Solaris 2, x86
-! Copyright (C) 1993 Free Software Foundation, Inc.
+! Copyright (C) 1993, 1998 Free Software Foundation, Inc.
! Written By Fred Fish, Nov 1992
!
! This file is free software; you can redistribute it and/or modify it
@@ -149,7 +149,7 @@ _start:
! A dummy profiling support routine for non-profiling executables,
! in case we link in some objects that have been compiled for profiling.
- .globl _mcount
+ .weak _mcount
_mcount:
ret
.type _mcount,@function
diff --git a/contrib/gcc/config/i386/sol2-gc1.asm b/contrib/gcc/config/i386/sol2-gc1.asm
new file mode 100644
index 0000000..24a1965
--- /dev/null
+++ b/contrib/gcc/config/i386/sol2-gc1.asm
@@ -0,0 +1,160 @@
+! gcrt1.s for Solaris 2, x86
+
+! Copyright (C) 1993 Free Software Foundation, Inc.
+! Written By Fred Fish, Nov 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file takes control of the process from the kernel, as specified
+! in section 3 of the System V Application Binary Interface, Intel386
+! Processor Supplement. It has been constructed from information obtained
+! from the ABI, information obtained from single stepping existing
+! Solaris executables through their startup code with gdb, and from
+! information obtained by single stepping executables on other i386 SVR4
+! implementations. This file is the first thing linked into any executable.
+
+! This is a modified crt1.s by J.W.Hawtin <oolon@ankh.org> 15/8/96,
+! to allow program profiling, by calling monstartup on entry and _mcleanup
+! on exit
+
+ .file "gcrt1.s"
+ .ident "GNU C gcrt1.s"
+ .weak _DYNAMIC
+ .text
+
+! Start creating the initial frame by pushing a NULL value for the return
+! address of the initial frame, and mark the end of the stack frame chain
+! (the innermost stack frame) with a NULL value, per page 3-32 of the ABI.
+! Initialize the first stack frame pointer in %ebp (the contents of which
+! are unspecified at process initialization).
+
+ .globl _start
+_start:
+ pushl $0x0
+ pushl $0x0
+ movl %esp,%ebp
+
+! As specified per page 3-32 of the ABI, %edx contains a function
+! pointer that should be registered with atexit(), for proper
+! shared object termination. Just push it onto the stack for now
+! to preserve it. We want to register _cleanup() first.
+
+ pushl %edx
+
+! Check to see if there is an _cleanup() function linked in, and if
+! so, register it with atexit() as the last thing to be run by
+! atexit().
+
+ movl $_mcleanup,%eax
+ testl %eax,%eax
+ je .L1
+ pushl $_mcleanup
+ call atexit
+ addl $0x4,%esp
+.L1:
+
+! Now check to see if we have an _DYNAMIC table, and if so then
+! we need to register the function pointer previously in %edx, but
+! now conveniently saved on the stack as the argument to pass to
+! atexit().
+
+ movl $_DYNAMIC,%eax
+ testl %eax,%eax
+ je .L2
+ call atexit
+.L2:
+
+! Register _fini() with atexit(). We will take care of calling _init()
+! directly.
+
+ pushl $_fini
+ call atexit
+
+! Start profiling
+
+ pushl %ebp
+ movl %esp,%ebp
+ pushl $_etext
+ pushl $_start
+ call monstartup
+ addl $8,%esp
+ popl %ebp
+
+! Compute the address of the environment vector on the stack and load
+! it into the global variable _environ. Currently argc is at 8 off
+! the frame pointer. Fetch the argument count into %eax, scale by the
+! size of each arg (4 bytes) and compute the address of the environment
+! vector which is 16 bytes (the two zero words we pushed, plus argc,
+! plus the null word terminating the arg vector) further up the stack,
+! off the frame pointer (whew!).
+
+ movl 8(%ebp),%eax
+ leal 16(%ebp,%eax,4),%edx
+ movl %edx,_environ
+
+! Push the environment vector pointer, the argument vector pointer,
+! and the argument count on to the stack to set up the arguments
+! for _init(), _fpstart(), and main(). Note that the environment
+! vector pointer and the arg count were previously loaded into
+! %edx and %eax respectively. The only new value we need to compute
+! is the argument vector pointer, which is at a fixed address off
+! the initial frame pointer.
+
+ pushl %edx
+ leal 12(%ebp),%edx
+ pushl %edx
+ pushl %eax
+
+! Call _init(argc, argv, environ), _fpstart(argc, argv, environ), and
+! main(argc, argv, environ).
+
+ call _init
+ call __fpstart
+ call main
+
+! Pop the argc, argv, and environ arguments off the stack, push the
+! value returned from main(), and call exit().
+
+ addl $12,%esp
+ pushl %eax
+ call exit
+
+! An inline equivalent of _exit, as specified in Figure 3-26 of the ABI.
+
+ pushl $0x0
+ movl $0x1,%eax
+ lcall $7,$0
+
+! If all else fails, just try a halt!
+
+ hlt
+ .type _start,@function
+ .size _start,.-_start
diff --git a/contrib/gcc/config/i386/sol2.h b/contrib/gcc/config/i386/sol2.h
index cc5ebca..8fc3e61 100644
--- a/contrib/gcc/config/i386/sol2.h
+++ b/contrib/gcc/config/i386/sol2.h
@@ -1,7 +1,6 @@
/* Target definitions for GNU compiler for Intel 80386 running Solaris 2
- Copyright (C) 1993, 1995 Free Software Foundation, Inc.
-
- Written by Fred Fish (fnf@cygnus.com).
+ Copyright (C) 1993, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Fred Fish (fnf@cygnus.com).
This file is part of GNU CC.
@@ -31,17 +30,16 @@ Boston, MA 02111-1307, USA. */
executed. This macro forces the assembler to do the padding, since
it knows what it is doing. */
-#define FORCE_INIT_SECTION_ALIGN do { asm (ALIGN_ASM_OP ## " 16"); } while (0)
+#define FORCE_INIT_SECTION_ALIGN asm (ALIGN_ASM_OP ## " 16")
#define FORCE_FINI_SECTION_ALIGN FORCE_INIT_SECTION_ALIGN
/* Add "sun" to the list of symbols defined for SVR4. */
#undef CPP_PREDEFINES
#define CPP_PREDEFINES \
- "-Di386 -Dunix -D__svr4__ -D__SVR4 -Dsun \
- -Asystem(unix) -Asystem(svr4) -Acpu(i386) -Amachine(i386)"
+ "-Dunix -D__svr4__ -D__SVR4 -Dsun -Asystem(svr4)"
#undef CPP_SPEC
-#define CPP_SPEC "\
+#define CPP_SPEC "%(cpp_cpu) \
%{compat-bsd:-iwithprefixbefore ucbinclude -I/usr/ucbinclude}"
#undef LIB_SPEC
@@ -51,10 +49,21 @@ Boston, MA 02111-1307, USA. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC "crtend.o%s %{pg:crtn.o%s}%{!pg:crtn.o%s}"
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}}\
+ %{pg:gmon.o%s} crti.o%s \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}} \
+ crtbegin.o%s"
+
/* This should be the same as in svr4.h, except with -R added. */
#undef LINK_SPEC
#define LINK_SPEC \
- "%{h*} %{V} %{v:%{!V:-V}} \
+ "%{h*} %{v:-V} \
%{b} %{Wl,*:%*} \
%{static:-dn -Bstatic} \
%{shared:-G -dy -z text} \
@@ -63,12 +72,14 @@ Boston, MA 02111-1307, USA. */
%{YP,*} \
%{R*} \
%{compat-bsd: \
- %{!YP,*:%{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
- %{!p:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}} \
+ %{!YP,*:%{pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!pg:%{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
-R /usr/ucblib} \
%{!compat-bsd: \
- %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
- %{!p:-Y P,/usr/ccs/lib:/usr/lib}}} \
+ %{!YP,*:%{pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!pg:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:-Y P,/usr/ccs/lib:/usr/lib}}}} \
%{Qy:} %{!Qn:-Qy}"
/* This defines which switch letters take arguments.
@@ -76,16 +87,9 @@ Boston, MA 02111-1307, USA. */
#undef SWITCH_TAKES_ARG
#define SWITCH_TAKES_ARG(CHAR) \
- ( (CHAR) == 'D' \
- || (CHAR) == 'U' \
- || (CHAR) == 'o' \
- || (CHAR) == 'e' \
- || (CHAR) == 'u' \
- || (CHAR) == 'I' \
- || (CHAR) == 'm' \
- || (CHAR) == 'L' \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
|| (CHAR) == 'R' \
- || (CHAR) == 'A' \
|| (CHAR) == 'h' \
|| (CHAR) == 'z')
+#define STDC_0_IN_SYSTEM_HEADERS
diff --git a/contrib/gcc/config/i386/sun386.h b/contrib/gcc/config/i386/sun386.h
index 6e26807..4d4d66c 100644
--- a/contrib/gcc/config/i386/sun386.h
+++ b/contrib/gcc/config/i386/sun386.h
@@ -1,5 +1,5 @@
/* Definitions for Sun assembler syntax for the Intel 80386.
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -131,10 +131,9 @@ do \
#define ASM_GENERATE_INTERNAL_LABEL(BUF,PREFIX,NUMBER) \
sprintf ((BUF), "*.%s%d", (PREFIX), (NUMBER))
-/* This is how to output a reference to a user-level label named NAME. */
+/* The prefix to add to user-visible assembler symbols. */
-#define ASM_OUTPUT_LABELREF(FILE,NAME) \
- fprintf (FILE, "%s", NAME)
+#define USER_LABEL_PREFIX ""
/* This is how to output an internal numbered label where
PREFIX is the class of label and NUM is the number within the class. */
diff --git a/contrib/gcc/config/i386/svr3.ifile b/contrib/gcc/config/i386/svr3.ifile
index f0bb3a0..32b3ddc 100644
--- a/contrib/gcc/config/i386/svr3.ifile
+++ b/contrib/gcc/config/i386/svr3.ifile
@@ -28,7 +28,10 @@ SECTIONS
vfork = fork; /* I got tired of editing peoples sloppy code */
*(.fini)
}
- GROUP BIND( NEXT(0x400000) + (ADDR(.text) + (SIZEOF(.text)) % 0x1000)):
+ .stab BIND(ADDR(.text) + SIZEOF(.text)): { }
+ .stabstr BIND(ADDR(.stab) + SIZEOF(.stab)): { }
+ GROUP BIND( NEXT(0x400000) +
+ (ADDR(.stabstr) + (SIZEOF(.stabstr)) % 0x1000)):
{
.data : {
__CTOR_LIST__ = . ;
diff --git a/contrib/gcc/config/i386/svr3dbx.h b/contrib/gcc/config/i386/svr3dbx.h
index d3348d5..36c01cc 100644
--- a/contrib/gcc/config/i386/svr3dbx.h
+++ b/contrib/gcc/config/i386/svr3dbx.h
@@ -46,16 +46,13 @@ Boston, MA 02111-1307, USA. */
/* Align labels, etc. at 4-byte boundaries.
For the 486, align to 16-byte boundary for sake of cache. */
-#undef ASM_OUTPUT_ALIGN_CODE
-#define ASM_OUTPUT_ALIGN_CODE(FILE) \
- fprintf ((FILE), "\t.align %d,0x90\n", \
- 1 << i386_align_jumps)
+#undef LABEL_ALIGN_AFTER_BARRIER
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) (i386_align_jumps)
/* Align start of loop at 4-byte boundary. */
-#undef ASM_OUTPUT_LOOP_ALIGN
-#define ASM_OUTPUT_LOOP_ALIGN(FILE) \
- fprintf ((FILE), "\t.align %d,0x90\n", 1 << i386_align_loops);
+#undef LOOP_ALIGN
+#define LOOP_ALIGN(LABEL) (i386_align_loops)
/* Additional overrides needed for dbx-in-coff gas, mostly taken from pbb.h */
@@ -66,17 +63,6 @@ Boston, MA 02111-1307, USA. */
*/
#define CTOR_LISTS_DEFINED_EXTERNALLY
-/* similar to default, but allows for the table defined by ld with svr3.ifile.
- nptrs is always 0. So we need to instead check that __DTOR_LIST__[1] != 0.
- The old check is left in so that the same macro can be used if and when
- a future version of gas does support section directives. */
-
-#define DO_GLOBAL_DTORS_BODY {int nptrs = *(int *)__DTOR_LIST__; int i; \
- if (nptrs == -1 || (__DTOR_LIST__[0] == 0 && __DTOR_LIST__[1] != 0)) \
- for (nptrs = 0; __DTOR_LIST__[nptrs + 1] != 0; nptrs++); \
- for (i = nptrs; i >= 1; i--) \
- __DTOR_LIST__[i] (); }
-
/* Use crt1.o as a startup file and crtn.o as a closing file. */
/*
* The loader directive file svr3.ifile defines how to merge the constructor
diff --git a/contrib/gcc/config/i386/svr3gas.h b/contrib/gcc/config/i386/svr3gas.h
index 401c766..a288b84 100644
--- a/contrib/gcc/config/i386/svr3gas.h
+++ b/contrib/gcc/config/i386/svr3gas.h
@@ -1,5 +1,5 @@
/* Definitions for Intel 386 running system V, using gas.
- Copyright (C) 1992 Free Software Foundation, Inc.
+ Copyright (C) 1992, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -139,29 +139,17 @@ do { \
#endif /* STACK_GROWS_DOWNWARD */
-/* Add extra sections .init and .fini, in addition to .bss from att386.h. */
+/* Add extra sections .rodata, .init and .fini. */
#undef EXTRA_SECTIONS
-#define EXTRA_SECTIONS in_const, in_bss, in_init, in_fini
+#define EXTRA_SECTIONS in_const, in_init, in_fini
#undef EXTRA_SECTION_FUNCTIONS
#define EXTRA_SECTION_FUNCTIONS \
CONST_SECTION_FUNCTION \
- BSS_SECTION_FUNCTION \
INIT_SECTION_FUNCTION \
FINI_SECTION_FUNCTION
-#define BSS_SECTION_FUNCTION \
-void \
-bss_section () \
-{ \
- if (in_section != in_bss) \
- { \
- fprintf (asm_out_file, "\t%s\n", BSS_SECTION_ASM_OP); \
- in_section = in_bss; \
- } \
-}
-
#define INIT_SECTION_FUNCTION \
void \
init_section () \
diff --git a/contrib/gcc/config/i386/svr3z.ifile b/contrib/gcc/config/i386/svr3z.ifile
index 4fdbb93..4946051 100644
--- a/contrib/gcc/config/i386/svr3z.ifile
+++ b/contrib/gcc/config/i386/svr3z.ifile
@@ -28,7 +28,10 @@ SECTIONS
vfork = fork; /* I got tired of editing peoples sloppy code */
*(.fini)
}
- GROUP BIND( NEXT(0x400000) + (ADDR(.text) + (SIZEOF(.text)) % 0x1000)):
+ .stab BIND(ADDR(.text) + SIZEOF(.text)): { }
+ .stabstr BIND(ADDR(.stab) + SIZEOF(.stab)): { }
+ GROUP BIND( NEXT(0x400000) +
+ (ADDR(.stabstr) + (SIZEOF(.stabstr)) % 0x1000)):
{
.data : {
__CTOR_LIST__ = . ;
diff --git a/contrib/gcc/config/i386/sysv3.h b/contrib/gcc/config/i386/sysv3.h
index 8c5cfc4..ce89889 100644
--- a/contrib/gcc/config/i386/sysv3.h
+++ b/contrib/gcc/config/i386/sysv3.h
@@ -1,5 +1,5 @@
/* Definitions for Intel 386 running system V.
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -26,7 +26,7 @@ Boston, MA 02111-1307, USA. */
#include "svr3.h"
/* Use the ATT assembler syntax.
- This overrides at least one macro (ASM_OUTPUT_LABELREF) from svr3.h. */
+ This overrides at least one macro (USER_LABEL_PREFIX) from svr3.h. */
#include "i386/att.h"
@@ -42,9 +42,9 @@ Boston, MA 02111-1307, USA. */
/* Specify predefined symbols in preprocessor. */
-#define CPP_PREDEFINES "-Dunix -Di386 -Asystem(unix) -Asystem(svr3) -Acpu(i386) -Amachine(i386)"
+#define CPP_PREDEFINES "-Dunix -Asystem(svr3)"
-#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}"
+#define CPP_SPEC "%(cpp_cpu) %{posix:-D_POSIX_SOURCE}"
/* Writing `int' for a bitfield forces int alignment for the structure. */
diff --git a/contrib/gcc/config/i386/sysv4.h b/contrib/gcc/config/i386/sysv4.h
index 92fcada..e688f7b 100644
--- a/contrib/gcc/config/i386/sysv4.h
+++ b/contrib/gcc/config/i386/sysv4.h
@@ -58,7 +58,7 @@ do { long value; \
/* This is how to output assembly code to define a `double' constant.
We always have to use a pair of .long pseudo-ops to do this because
the native SVR4 ELF assembler is buggy and it generates incorrect
- values when we try to use the the .double pseudo-op instead. */
+ values when we try to use the .double pseudo-op instead. */
#undef ASM_OUTPUT_DOUBLE
#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
@@ -236,10 +236,18 @@ do { long value[3]; \
i386.md for an explanation of the expression this outputs. */
#undef ASM_OUTPUT_ADDR_DIFF_ELT
-#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, VALUE, REL) \
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
fprintf (FILE, "\t.long _GLOBAL_OFFSET_TABLE_+[.-%s%d]\n", LPREFIX, VALUE)
/* Indicate that jump tables go in the text section. This is
necessary when compiling PIC code. */
-#define JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION (flag_pic)
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
diff --git a/contrib/gcc/config/i386/t-crtpic b/contrib/gcc/config/i386/t-crtpic
index f5dd073..ff81a9be 100644
--- a/contrib/gcc/config/i386/t-crtpic
+++ b/contrib/gcc/config/i386/t-crtpic
@@ -7,3 +7,4 @@
# routines in crtstuff.c.
CRTSTUFF_T_CFLAGS = -fPIC -fno-omit-frame-pointer
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/i386/t-dgux b/contrib/gcc/config/i386/t-dgux
new file mode 100644
index 0000000..292331f
--- /dev/null
+++ b/contrib/gcc/config/i386/t-dgux
@@ -0,0 +1,4 @@
+#
+# target makefile for dgux
+#
+EXTRA_PARTS=crtbegin.o crtend.o
diff --git a/contrib/gcc/config/i386/t-mingw32 b/contrib/gcc/config/i386/t-mingw32
new file mode 100644
index 0000000..fe948c6
--- /dev/null
+++ b/contrib/gcc/config/i386/t-mingw32
@@ -0,0 +1,4 @@
+#
+# collect2 doesn't work for i386-mingw32* yet.
+#
+USE_COLLECT2=
diff --git a/contrib/gcc/config/i386/t-next b/contrib/gcc/config/i386/t-next
index ec6373f..effa695 100644
--- a/contrib/gcc/config/i386/t-next
+++ b/contrib/gcc/config/i386/t-next
@@ -7,3 +7,6 @@ OTHER_FIXINCLUDES_DIRS= /LocalDeveloper/Headers
# <limits.h> is sometimes in /usr/include/ansi/limits.h.
LIMITS_H_TEST = [ -f $(SYSTEM_HEADER_DIR)/limits.h -o -f $(SYSTEM_HEADER_DIR)/ansi/limits.h ]
+
+nextstep.o: $(srcdir)/config/nextstep.c $(CONFIG_H) flags.h tree.h
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/nextstep.c
diff --git a/contrib/gcc/config/i386/t-osf b/contrib/gcc/config/i386/t-osf
new file mode 100644
index 0000000..c996e0c
--- /dev/null
+++ b/contrib/gcc/config/i386/t-osf
@@ -0,0 +1,2 @@
+# If compiling with the osf gcc, avoid sharing code.
+TCFLAGS = -pic-none
diff --git a/contrib/gcc/config/i386/t-osf1elf b/contrib/gcc/config/i386/t-osf1elf
new file mode 100644
index 0000000..77c7df1
--- /dev/null
+++ b/contrib/gcc/config/i386/t-osf1elf
@@ -0,0 +1,18 @@
+# Assemble startup files.
+crti.o: $(srcdir)/config/i386/osf1-ci.asm $(GCC_PASSES)
+ sed -e '/^!/d' <$(srcdir)/config/i386/osf1-ci.asm >crti.s
+ $(GCC_FOR_TARGET) -c -o crti.o crti.s
+crtn.o: $(srcdir)/config/i386/osf1-cn.asm $(GCC_PASSES)
+ sed -e '/^!/d' <$(srcdir)/config/i386/osf1-cn.asm >crtn.s
+ $(GCC_FOR_TARGET) -c -o crtn.o crtn.s
+
+# The pushl in CTOR initialization interferes with frame pointer elimination.
+
+# We need to use -fPIC when we are using gcc to compile the routines in
+# crtstuff.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fPIC when compiling the
+# routines in crtstuff.c.
+
+CRTSTUFF_T_CFLAGS = -fPIC -fno-omit-frame-pointer
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/i386/t-sco5 b/contrib/gcc/config/i386/t-sco5
new file mode 100644
index 0000000..f602066
--- /dev/null
+++ b/contrib/gcc/config/i386/t-sco5
@@ -0,0 +1,20 @@
+# The pushl in CTOR initialization interferes with frame pointer elimination.
+CRTSTUFF_T_CFLAGS = -fPIC -fno-omit-frame-pointer
+CRTSTUFF_T_CFLAGS_S = -mcoff -fno-omit-frame-pointer
+
+#
+# I am still a little unsure of the multilib architecture. The following
+# 4 lines are based on advice from meissner@cygnus.com.
+#
+MULTILIB_OPTIONS = mcoff/fPIC
+MULTILIB_DIRNAMES = coff pic
+MULTILIB_EXCEPTIONS = *mcoff*/*fPIC*
+MULTILIB_MATCHES = fPIC=fpic
+MULTILIB_EXTRA_OPTS =
+
+LIBGCC=stmp-multilib
+INSTALL_LIBGCC=install-multilib
+
+crti.o: $(srcdir)/config/i386/sol2-ci.asm $(GCC_PASSES)
+ sed -e '/^!/d' <$(srcdir)/config/i386/sol2-ci.asm >crti.s
+ $(GCC_FOR_TARGET) -c -o crti.o crti.s
diff --git a/contrib/gcc/config/i386/t-sco5gas b/contrib/gcc/config/i386/t-sco5gas
new file mode 100644
index 0000000..2bca87b
--- /dev/null
+++ b/contrib/gcc/config/i386/t-sco5gas
@@ -0,0 +1,20 @@
+# The pushl in CTOR initialization interferes with frame pointer elimination.
+CRTSTUFF_T_CFLAGS = -fPIC -fno-omit-frame-pointer
+CRTSTUFF_T_CFLAGS_S = -mcoff -fno-omit-frame-pointer
+
+#
+# I am still a little unsure of the multilib architecture. The following
+# 4 lines are based on advice from meissner@cygnus.com.
+#
+MULTILIB_OPTIONS = fPIC
+MULTILIB_DIRNAMES = pic
+MULTILIB_EXCEPTIONS = *fPIC*
+MULTILIB_MATCHES = fPIC=fpic
+MULTILIB_EXTRA_OPTS =
+
+LIBGCC=stmp-multilib
+INSTALL_LIBGCC=install-multilib
+
+crti.o: $(srcdir)/config/i386/sol2-ci.asm $(GCC_PASSES)
+ sed -e '/^!/d' <$(srcdir)/config/i386/sol2-ci.asm >crti.s
+ $(GCC_FOR_TARGET) -c -o crti.o crti.s
diff --git a/contrib/gcc/config/i386/t-sol2 b/contrib/gcc/config/i386/t-sol2
index f79f6ca..5dc59cc 100644
--- a/contrib/gcc/config/i386/t-sol2
+++ b/contrib/gcc/config/i386/t-sol2
@@ -13,20 +13,28 @@ gmon.o: $(srcdir)/config/i386/gmon-sol2.c $(GCC_PASSES) $(CONFIG_H)
# Apparently Sun believes that assembler files don't need comments, because no
# single ASCII character is valid (tried them all). So we manually strip out
# the comments with sed. This bug may only be in the Early Access releases.
-crt1.o: $(srcdir)/config/i386/sol2-c1.asm
+gcrt1.o: $(srcdir)/config/i386/sol2-gc1.asm
+ sed -e '/^!/d' <$(srcdir)/config/i386/sol2-gc1.asm >gcrt1.s
+ $(AS) -o gcrt1.o gcrt1.s
+crt1.o: $(srcdir)/config/i386/sol2-c1.asm $(GCC_PASSES)
sed -e '/^!/d' <$(srcdir)/config/i386/sol2-c1.asm >crt1.s
- $(AS) -o crt1.o crt1.s
-crti.o: $(srcdir)/config/i386/sol2-ci.asm
+ $(GCC_FOR_TARGET) -c -o crt1.o crt1.s
+crti.o: $(srcdir)/config/i386/sol2-ci.asm $(GCC_PASSES)
sed -e '/^!/d' <$(srcdir)/config/i386/sol2-ci.asm >crti.s
- $(AS) -o crti.o crti.s
-crtn.o: $(srcdir)/config/i386/sol2-cn.asm
+ $(GCC_FOR_TARGET) -c -o crti.o crti.s
+crtn.o: $(srcdir)/config/i386/sol2-cn.asm $(GCC_PASSES)
sed -e '/^!/d' <$(srcdir)/config/i386/sol2-cn.asm >crtn.s
- $(AS) -o crtn.o crtn.s
+ $(GCC_FOR_TARGET) -c -o crtn.o crtn.s
# We need to use -fPIC when we are using gcc to compile the routines in
# crtstuff.c. This is only really needed when we are going to use gcc/g++
# to produce a shared library, but since we don't know ahead of time when
# we will be doing that, we just always use -fPIC when compiling the
# routines in crtstuff.c.
+#
+# We must also enable optimization to avoid having any code appear after
+# the call & alignment statement, but before we switch back to the
+# .text section.
-CRTSTUFF_T_CFLAGS = -fPIC
+CRTSTUFF_T_CFLAGS = -fPIC -O2
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/i386/t-winnt b/contrib/gcc/config/i386/t-winnt
index e8e1a0a..1e3557c 100644
--- a/contrib/gcc/config/i386/t-winnt
+++ b/contrib/gcc/config/i386/t-winnt
@@ -1,2 +1,6 @@
winnt.o: $(srcdir)/config/i386/winnt.c
- $(CC) -I. -I$(srcdir) -I$(srcdir)/config -c $(srcdir)/config/i386/winnt.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/i386/winnt.c
+oldnames.o: $(srcdir)/config/winnt/oldnames.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/winnt/oldnames.c
+spawnv.o: $(srcdir)/config/winnt/spawnv.c
+ $(CC) -c $(ALL_CFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $(srcdir)/config/winnt/spawnv.c
diff --git a/contrib/gcc/config/i386/unix.h b/contrib/gcc/config/i386/unix.h
index f38fe27..47440dd 100644
--- a/contrib/gcc/config/i386/unix.h
+++ b/contrib/gcc/config/i386/unix.h
@@ -45,7 +45,9 @@ Boston, MA 02111-1307, USA. */
count is in %cl. Some assemblers require %cl as an argument;
some don't. This macro controls what to do: by default, don't
print %cl. */
-#define AS3_SHIFT_DOUBLE(a,b,c,d) AS2 (a,c,d)
+#define SHIFT_DOUBLE_OMITS_COUNT 1
+#define AS3_SHIFT_DOUBLE(a,b,c,d) \
+ (SHIFT_DOUBLE_OMITS_COUNT ? AS2 (a,c,d) : AS3 (a,b,c,d))
/* Output the size-letter for an opcode.
CODE is the letter used in an operand spec (L, B, W, S or Q).
@@ -99,7 +101,6 @@ Boston, MA 02111-1307, USA. */
/* String containing the assembler's comment-starter. */
#define ASM_COMMENT_START "/"
-#define COMMENT_BEGIN "/"
/* Output to assembler file text saying following lines
may contain character constants, extra white space, comments, etc. */
@@ -146,3 +147,45 @@ Boston, MA 02111-1307, USA. */
#define FUNCTION_VALUE_REGNO_P(N) \
((N) == 0 || ((N)== FIRST_FLOAT_REG && TARGET_FLOAT_RETURNS_IN_80387))
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ tree parm; \
+ \
+ if (i386_regparm > 0) \
+ parm = TYPE_ARG_TYPES (TREE_TYPE (function)); \
+ else \
+ parm = NULL_TREE; \
+ for (; parm; parm = TREE_CHAIN (parm)) \
+ if (TREE_VALUE (parm) == void_type_node) \
+ break; \
+ fprintf (FILE, "\taddl $%d,%s\n", DELTA, \
+ parm ? "%eax" \
+ : aggregate_value_p (TREE_TYPE (TREE_TYPE (FUNCTION))) ? "8(%esp)" \
+ : "4(%esp)"); \
+ \
+ if (flag_pic) \
+ { \
+ rtx xops[2]; \
+ xops[0] = pic_offset_table_rtx; \
+ xops[1] = (rtx) gen_label_rtx (); \
+ \
+ if (i386_regparm > 2) \
+ abort (); \
+ output_asm_insn ("push%L0 %0", xops); \
+ output_asm_insn (AS1 (call,%P1), xops); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "L", CODE_LABEL_NUMBER (xops[1])); \
+ output_asm_insn (AS1 (pop%L0,%0), xops); \
+ output_asm_insn ("addl $_GLOBAL_OFFSET_TABLE_+[.-%P1],%0", xops); \
+ fprintf (FILE, "\tmovl "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, "@GOT(%%ebx),%%ecx\n\tpopl %%ebx\n\tjmp *%%ecx\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tjmp "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, "\n"); \
+ } \
+} while (0)
diff --git a/contrib/gcc/config/i386/vxi386.h b/contrib/gcc/config/i386/vxi386.h
new file mode 100644
index 0000000..1044286
--- /dev/null
+++ b/contrib/gcc/config/i386/vxi386.h
@@ -0,0 +1,23 @@
+/* Definitions of target machine for GNU compiler. VxWorks i386 version.
+ Copyright (C) 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "i386/i386-aout.h"
+
+#define HANDLE_SYSV_PRAGMA
diff --git a/contrib/gcc/config/i386/winnt.c b/contrib/gcc/config/i386/winnt.c
index 3a7ebf1..0058eb7 100644
--- a/contrib/gcc/config/i386/winnt.c
+++ b/contrib/gcc/config/i386/winnt.c
@@ -1,6 +1,6 @@
/* Subroutines for insn-output.c for Windows NT.
Contributed by Douglas Rupp (drupp@cs.washington.edu)
- Copyright (C) 1995 Free Software Foundation, Inc.
+ Copyright (C) 1995, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -19,8 +19,8 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#include <stdio.h>
#include "config.h"
+#include "system.h"
#include "rtl.h"
#include "regs.h"
#include "hard-reg-set.h"
@@ -28,6 +28,342 @@ Boston, MA 02111-1307, USA. */
#include "tree.h"
#include "flags.h"
+/* i386/PE specific attribute support.
+
+ i386/PE has two new attributes:
+ dllexport - for exporting a function/variable that will live in a dll
+ dllimport - for importing a function/variable from a dll
+
+ Microsoft allows multiple declspecs in one __declspec, separating
+ them with spaces. We do NOT support this. Instead, use __declspec
+ multiple times.
+*/
+
+/* Return nonzero if ATTR is a valid attribute for DECL.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR. */
+
+int
+i386_pe_valid_decl_attribute_p (decl, attributes, attr, args)
+ tree decl;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args == NULL_TREE)
+ {
+ if (is_attribute_p ("dllexport", attr))
+ return 1;
+ if (is_attribute_p ("dllimport", attr))
+ return 1;
+ }
+
+ return i386_valid_decl_attribute_p (decl, attributes, attr, args);
+}
+
+/* Return nonzero if ATTR is a valid attribute for TYPE.
+ ATTRIBUTES are any existing attributes and ARGS are the arguments
+ supplied with ATTR. */
+
+int
+i386_pe_valid_type_attribute_p (type, attributes, attr, args)
+ tree type;
+ tree attributes;
+ tree attr;
+ tree args;
+{
+ if (args == NULL_TREE
+ && (TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE))
+ {
+ if (is_attribute_p ("dllexport", attr))
+ return 1;
+ if (is_attribute_p ("dllimport", attr))
+ return 1;
+ }
+
+ return i386_valid_type_attribute_p (type, attributes, attr, args);
+}
+
+/* Merge attributes in decls OLD and NEW.
+
+ This handles the following situation:
+
+ __declspec (dllimport) int foo;
+ int foo;
+
+ The second instance of `foo' nullifies the dllimport. */
+
+tree
+i386_pe_merge_decl_attributes (old, new)
+ tree old, new;
+{
+ tree a;
+ int delete_dllimport_p;
+
+ old = DECL_MACHINE_ATTRIBUTES (old);
+ new = DECL_MACHINE_ATTRIBUTES (new);
+
+ /* What we need to do here is remove from `old' dllimport if it doesn't
+ appear in `new'. dllimport behaves like extern: if a declaration is
+ marked dllimport and a definition appears later, then the object
+ is not dllimport'd. */
+
+ if (lookup_attribute ("dllimport", old) != NULL_TREE
+ && lookup_attribute ("dllimport", new) == NULL_TREE)
+ delete_dllimport_p = 1;
+ else
+ delete_dllimport_p = 0;
+
+ a = merge_attributes (old, new);
+
+ if (delete_dllimport_p)
+ {
+ tree prev,t;
+
+ /* Scan the list for dllimport and delete it. */
+ for (prev = NULL_TREE, t = a; t; prev = t, t = TREE_CHAIN (t))
+ {
+ if (is_attribute_p ("dllimport", TREE_PURPOSE (t)))
+ {
+ if (prev == NULL_TREE)
+ a = TREE_CHAIN (a);
+ else
+ TREE_CHAIN (prev) = TREE_CHAIN (t);
+ break;
+ }
+ }
+ }
+
+ return a;
+}
+
+/* Return the type that we should use to determine if DECL is
+ imported or exported. */
+
+static tree
+associated_type (decl)
+ tree decl;
+{
+ tree t = NULL_TREE;
+
+ /* In the C++ frontend, DECL_CONTEXT for a method doesn't actually refer
+ to the containing class. So we look at the 'this' arg. */
+ if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
+ {
+ /* Artificial methods are not affected by the import/export status of
+ their class unless they are virtual. */
+ if (! DECL_ARTIFICIAL (decl) || DECL_VINDEX (decl))
+ t = TREE_TYPE (TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl))));
+ }
+ else if (DECL_CONTEXT (decl)
+ && TREE_CODE_CLASS (TREE_CODE (DECL_CONTEXT (decl))) == 't')
+ t = DECL_CONTEXT (decl);
+
+ return t;
+}
+
+/* Return non-zero if DECL is a dllexport'd object. */
+
+int
+i386_pe_dllexport_p (decl)
+ tree decl;
+{
+ tree exp;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ exp = lookup_attribute ("dllexport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (exp)
+ return 1;
+
+ /* Class members get the dllexport status of their class. */
+ if (associated_type (decl))
+ {
+ exp = lookup_attribute ("dllexport",
+ TYPE_ATTRIBUTES (associated_type (decl)));
+ if (exp)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if DECL is a dllimport'd object. */
+
+int
+i386_pe_dllimport_p (decl)
+ tree decl;
+{
+ tree imp;
+
+ if (TREE_CODE (decl) == FUNCTION_DECL
+ && TARGET_NOP_FUN_DLLIMPORT)
+ return 0;
+
+ if (TREE_CODE (decl) != VAR_DECL
+ && TREE_CODE (decl) != FUNCTION_DECL)
+ return 0;
+ imp = lookup_attribute ("dllimport", DECL_MACHINE_ATTRIBUTES (decl));
+ if (imp)
+ return 1;
+
+ /* Class members get the dllimport status of their class. */
+ if (associated_type (decl))
+ {
+ imp = lookup_attribute ("dllimport",
+ TYPE_ATTRIBUTES (associated_type (decl)));
+ if (imp)
+ return 1;
+ }
+
+ return 0;
+}
+
+/* Return non-zero if SYMBOL is marked as being dllexport'd. */
+
+int
+i386_pe_dllexport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'e' && symbol[2] == '.';
+}
+
+/* Return non-zero if SYMBOL is marked as being dllimport'd. */
+
+int
+i386_pe_dllimport_name_p (symbol)
+ char *symbol;
+{
+ return symbol[0] == '@' && symbol[1] == 'i' && symbol[2] == '.';
+}
+
+/* Mark a DECL as being dllexport'd.
+ Note that we override the previous setting (eg: dllimport). */
+
+void
+i386_pe_mark_dllexport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ rtx rtlname;
+ tree idp;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (i386_pe_dllimport_name_p (oldname))
+ oldname += 9;
+ else if (i386_pe_dllexport_name_p (oldname))
+ return; /* already done */
+
+ newname = alloca (strlen (oldname) + 4);
+ sprintf (newname, "@e.%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ idp = get_identifier (newname);
+
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+}
+
+/* Mark a DECL as being dllimport'd. */
+
+void
+i386_pe_mark_dllimport (decl)
+ tree decl;
+{
+ char *oldname, *newname;
+ tree idp;
+ rtx rtlname, newrtl;
+
+ rtlname = XEXP (DECL_RTL (decl), 0);
+ if (GET_CODE (rtlname) == SYMBOL_REF)
+ oldname = XSTR (rtlname, 0);
+ else if (GET_CODE (rtlname) == MEM
+ && GET_CODE (XEXP (rtlname, 0)) == SYMBOL_REF)
+ oldname = XSTR (XEXP (rtlname, 0), 0);
+ else
+ abort ();
+ if (i386_pe_dllexport_name_p (oldname))
+ {
+ error ("`%s' declared as both exported to and imported from a DLL.",
+ IDENTIFIER_POINTER (DECL_NAME (decl)));
+ return;
+ }
+ else if (i386_pe_dllimport_name_p (oldname))
+ {
+ /* Already done, but force correct linkage since the redeclaration
+ might have omitted explicit extern. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+ return;
+ }
+
+ /* ??? One can well ask why we're making these checks here,
+ and that would be a good question. */
+
+ /* Imported variables can't be initialized. Note that C++ classes
+ are marked initial, so we need to check. */
+ if (TREE_CODE (decl) == VAR_DECL
+ && !DECL_VIRTUAL_P (decl)
+ && (DECL_INITIAL (decl)
+ && ! TYPE_NEEDS_CONSTRUCTING (TREE_TYPE (decl))))
+ {
+ error_with_decl (decl, "initialized variable `%s' is marked dllimport");
+ return;
+ }
+ /* Nor can they be static. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl)
+ && 0 /*???*/)
+ {
+ error_with_decl (decl, "static variable `%s' is marked dllimport");
+ return;
+ }
+
+ /* `extern' needn't be specified with dllimport.
+ Specify `extern' now and hope for the best. Sigh. */
+ if (TREE_CODE (decl) == VAR_DECL
+ /* ??? Is this test for vtables needed? */
+ && !DECL_VIRTUAL_P (decl))
+ {
+ DECL_EXTERNAL (decl) = 1;
+ TREE_PUBLIC (decl) = 1;
+ }
+
+ newname = alloca (strlen (oldname) + 11);
+ sprintf (newname, "@i._imp__%s", oldname);
+
+ /* We pass newname through get_identifier to ensure it has a unique
+ address. RTL processing can sometimes peek inside the symbol ref
+ and compare the string's addresses to see if two symbols are
+ identical. */
+ idp = get_identifier (newname);
+
+ newrtl = gen_rtx (MEM, Pmode,
+ gen_rtx (SYMBOL_REF, Pmode,
+ IDENTIFIER_POINTER (idp)));
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ /* Can't treat a pointer to this as a constant address */
+ DECL_NON_ADDR_CONST_P (decl) = 1;
+}
+
/* Return string which is the former assembler name modified with a
suffix consisting of an atsign (@) followed by the number of bytes of
arguments */
@@ -37,6 +373,8 @@ gen_stdcall_suffix (decl)
tree decl;
{
int total = 0;
+ /* ??? This probably should use XSTR (XEXP (DECL_RTL (decl), 0), 0) instead
+ of DECL_ASSEMBLER_NAME. */
char *asmname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
char *newsym;
@@ -44,13 +382,19 @@ gen_stdcall_suffix (decl)
if (TREE_VALUE (tree_last (TYPE_ARG_TYPES (TREE_TYPE (decl))))
== void_type_node)
{
- tree formal_type = TYPE_ARG_TYPES (TREE_TYPE (decl));
+ tree formal_type = TYPE_ARG_TYPES (TREE_TYPE (decl));
- while (TREE_VALUE (formal_type) != void_type_node)
- {
- total += TREE_INT_CST_LOW (TYPE_SIZE (TREE_VALUE (formal_type)));
- formal_type = TREE_CHAIN (formal_type);
- }
+ while (TREE_VALUE (formal_type) != void_type_node)
+ {
+ int parm_size
+ = TREE_INT_CST_LOW (TYPE_SIZE (TREE_VALUE (formal_type)));
+ /* Must round up to include padding. This is done the same
+ way as in store_one_arg. */
+ parm_size = ((parm_size + PARM_BOUNDARY - 1)
+ / PARM_BOUNDARY * PARM_BOUNDARY);
+ total += parm_size;
+ formal_type = TREE_CHAIN (formal_type);
+ }
}
newsym = xmalloc (strlen (asmname) + 10);
@@ -58,3 +402,169 @@ gen_stdcall_suffix (decl)
return IDENTIFIER_POINTER (get_identifier (newsym));
}
+/* Cover function to implement ENCODE_SECTION_INFO. */
+
+void
+i386_pe_encode_section_info (decl)
+ tree decl;
+{
+ /* This bit is copied from i386.h. */
+ if (optimize > 0 && TREE_CONSTANT (decl)
+ && (!flag_writable_strings || TREE_CODE (decl) != STRING_CST))
+ {
+ rtx rtl = (TREE_CODE_CLASS (TREE_CODE (decl)) != 'd'
+ ? TREE_CST_RTL (decl) : DECL_RTL (decl));
+ SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
+ }
+
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ if (lookup_attribute ("stdcall",
+ TYPE_ATTRIBUTES (TREE_TYPE (decl))))
+ XEXP (DECL_RTL (decl), 0) =
+ gen_rtx (SYMBOL_REF, Pmode, gen_stdcall_suffix (decl));
+
+ /* Mark the decl so we can tell from the rtl whether the object is
+ dllexport'd or dllimport'd. */
+
+ if (i386_pe_dllexport_p (decl))
+ i386_pe_mark_dllexport (decl);
+ else if (i386_pe_dllimport_p (decl))
+ i386_pe_mark_dllimport (decl);
+ /* It might be that DECL has already been marked as dllimport, but a
+ subsequent definition nullified that. The attribute is gone but
+ DECL_RTL still has @i._imp__foo. We need to remove that. Ditto
+ for the DECL_NON_ADDR_CONST_P flag. */
+ else if ((TREE_CODE (decl) == FUNCTION_DECL
+ || TREE_CODE (decl) == VAR_DECL)
+ && DECL_RTL (decl) != NULL_RTX
+ && GET_CODE (DECL_RTL (decl)) == MEM
+ && GET_CODE (XEXP (DECL_RTL (decl), 0)) == MEM
+ && GET_CODE (XEXP (XEXP (DECL_RTL (decl), 0), 0)) == SYMBOL_REF
+ && i386_pe_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0)))
+ {
+ char *oldname = XSTR (XEXP (XEXP (DECL_RTL (decl), 0), 0), 0);
+ tree idp = get_identifier (oldname + 9);
+ rtx newrtl = gen_rtx (SYMBOL_REF, Pmode, IDENTIFIER_POINTER (idp));
+
+ XEXP (DECL_RTL (decl), 0) = newrtl;
+
+ DECL_NON_ADDR_CONST_P (decl) = 0;
+
+ /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
+ We leave these alone for now. */
+ }
+}
+
+/* Cover function for UNIQUE_SECTION. */
+
+void
+i386_pe_unique_section (decl, reloc)
+ tree decl;
+ int reloc;
+{
+ int len;
+ char *name,*string,*prefix;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ /* Strip off any encoding in fnname. */
+ STRIP_NAME_ENCODING (name, name);
+
+ /* The object is put in, for example, section .text$foo.
+ The linker will then ultimately place them in .text
+ (everything from the $ on is stripped). Don't put
+ read-only data in .rdata section to avoid a PE linker
+ bug when .rdata$* grouped sections are used in code
+ without a .rdata section. */
+ if (TREE_CODE (decl) == FUNCTION_DECL)
+ prefix = ".text$";
+ else if (DECL_READONLY_SECTION (decl, reloc))
+#ifdef READONLY_DATA_SECTION
+ prefix = ".rdata$";
+#else
+ prefix = ".text$";
+#endif
+ else
+ prefix = ".data$";
+ len = strlen (name) + strlen (prefix);
+ string = alloca (len + 1);
+ sprintf (string, "%s%s", prefix, name);
+
+ DECL_SECTION_NAME (decl) = build_string (len, string);
+}
+
+/* The Microsoft linker requires that every function be marked as
+ DT_FCN. When using gas on cygwin32, we must emit appropriate .type
+ directives. */
+
+#include "gsyms.h"
+
+/* Mark a function appropriately. This should only be called for
+ functions for which we are not emitting COFF debugging information.
+ FILE is the assembler output file, NAME is the name of the
+ function, and PUBLIC is non-zero if the function is globally
+ visible. */
+
+void
+i386_pe_declare_function_type (file, name, public)
+ FILE *file;
+ char *name;
+ int public;
+{
+ fprintf (file, "\t.def\t");
+ assemble_name (file, name);
+ fprintf (file, ";\t.scl\t%d;\t.type\t%d;\t.endef\n",
+ public ? (int) C_EXT : (int) C_STAT,
+ (int) DT_FCN << N_BTSHFT);
+}
+
+/* Keep a list of external functions. */
+
+struct extern_list
+{
+ struct extern_list *next;
+ char *name;
+};
+
+static struct extern_list *extern_head;
+
+/* Assemble an external function reference. We need to keep a list of
+ these, so that we can output the function types at the end of the
+ assembly. We can't output the types now, because we might see a
+ definition of the function later on and emit debugging information
+ for it then. */
+
+void
+i386_pe_record_external_function (name)
+ char *name;
+{
+ struct extern_list *p;
+
+ p = (struct extern_list *) permalloc (sizeof *p);
+ p->next = extern_head;
+ p->name = name;
+ extern_head = p;
+}
+
+/* This is called at the end of assembly. For each external function
+ which has not been defined, we output a declaration now. */
+
+void
+i386_pe_asm_file_end (file)
+ FILE *file;
+{
+ struct extern_list *p;
+
+ for (p = extern_head; p != NULL; p = p->next)
+ {
+ tree decl;
+
+ decl = get_identifier (p->name);
+
+ /* Positively ensure only one declaration for any given symbol. */
+ if (! TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (decl))
+ {
+ TREE_ASM_WRITTEN (decl) = 1;
+ i386_pe_declare_function_type (file, p->name, TREE_PUBLIC (decl));
+ }
+ }
+}
diff --git a/contrib/gcc/config/i386/x-dgux b/contrib/gcc/config/i386/x-dgux
new file mode 100644
index 0000000..322bfe3
--- /dev/null
+++ b/contrib/gcc/config/i386/x-dgux
@@ -0,0 +1,11 @@
+#
+# host is ix86 running dgux
+#
+CC = /bin/gcc
+X_CFLAGS = -O -mstandard -mlegend
+BOOT_CFLAGS = -O2 -g -mstandard -mlegend $(CFLAGS)
+CLIB = -lw32
+RANLIB = true
+USER_H = $(EXTRA_HEADERS) $(LANG_EXTRA_HEADERS)
+STMP_FIXPROTO =
+
diff --git a/contrib/gcc/config/i386/x-osf1elf b/contrib/gcc/config/i386/x-osf1elf
new file mode 100644
index 0000000..1467381
--- /dev/null
+++ b/contrib/gcc/config/i386/x-osf1elf
@@ -0,0 +1,8 @@
+# Defaults for OSF/1 1.3+
+CC = $(OLDCC)
+CLIB = -lld
+INSTALL = installbsd -c
+OLDCC = /usr/ccs/gcc/gcc
+X_CFLAGS = -static
+
+# FIXPROTO_DEFINES = -D_XOPEN_SOURCE
diff --git a/contrib/gcc/config/i386/x-osfrose b/contrib/gcc/config/i386/x-osfrose
index a419bdb..2c5e3ba 100644
--- a/contrib/gcc/config/i386/x-osfrose
+++ b/contrib/gcc/config/i386/x-osfrose
@@ -7,8 +7,9 @@
BUILD =
CC = $(OLDCC)
CLIB = -lld
-X_CFLAGS = $(DEB_OPT) $(MSTATS) $(SHLIB) $(X_DEFINES)
-X_CFLAGS_NODEBUG = $(NO_DEBUG) $(MSTATS) $(OPT) $(PROFILE) $(SHLIB) $(X_DEFINES) $(XCFLAGS)
+X_CFLAGS = $(DEB_OPT) $(MSTATS) $(X_DEFINES)
+X_CFLAGS_NODEBUG = $(NO_DEBUG) $(MSTATS) $(OPT) $(PROFILE) $(X_DEFINES) $(XCFLAGS)
+XCFLAGS = $(SHLIB)
CPP_ABORT = # -Dabort=fancy_abort
CPPFLAGS = $(CPP_ABORT) $(SYSTEM_INCLUDES)
DEB_OPT = $(OPT) $(DEBUG) $(PROFILE)
@@ -16,7 +17,6 @@ DEBUG =
DEBUG_COLLECT = # -DDEBUG
CCLIBFLAGS = -O -DNO_HALF_PIC
GCC_CFLAGS = $(INTERNAL_CFLAGS) $(X_CFLAGS) $(T_CFLAGS) $(CFLAGS) -B./ -DPOSIX -DNO_HALF_PIC
-INSTALL = installbsd -c
LDFLAGS =
MSTATS = # -mstats
OLDCC = /usr/ccs/gcc/gcc
@@ -25,7 +25,3 @@ PROFILE =
SHLIB = -pic-none
SYSTEM_INCLUDES = # -I${BUILD}/usr/include
X_DEFINES = -Dvfork=fork
-
-libdir = /usr/ccs
-mandir = /usr/ccs/gcc/$(target)/$(version)
-bindir = /usr/ccs/gcc/$(target)/$(version)
diff --git a/contrib/gcc/config/i386/x-sco5 b/contrib/gcc/config/i386/x-sco5
new file mode 100644
index 0000000..e13ed74
--- /dev/null
+++ b/contrib/gcc/config/i386/x-sco5
@@ -0,0 +1,10 @@
+RANLIB = :
+RANLIB_TEST = false
+CC = cc
+OLDCC = cc
+CCLIBFLAGS =
+# We avoid the ALLOCA in -lPW becuase it gives us an evil index()
+ALLOCA = alloca.o
+
+# See all the declarations.
+FIXPROTO_DEFINES = -D_XOPEN_SOURCE -D_POSIX_C_SOURCE=2
diff --git a/contrib/gcc/config/i386/xm-aix.h b/contrib/gcc/config/i386/xm-aix.h
index 5e5d402..4cbd36e 100644
--- a/contrib/gcc/config/i386/xm-aix.h
+++ b/contrib/gcc/config/i386/xm-aix.h
@@ -1,37 +1,2 @@
-/* Configuration for GNU C-compiler for IBM PS/2 running AIX/386.
- Copyright (C) 1988, 1993 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
-#define USG
-
#undef TRUE
#undef FALSE
-
-#include "i386/xm-i386.h"
-
-#define bcopy(a,b,c) memcpy (b,a,c)
-#define bzero(a,b) memset (a,0,b)
-#define bcmp(a,b,c) memcmp (a,b,c)
-
-/* If not compiled with GNU C, use the portable alloca. */
-#ifndef __GNUC__
-#define USE_C_ALLOCA
-#endif
-
-#define HAVE_PUTENV
diff --git a/contrib/gcc/config/i386/xm-bsd386.h b/contrib/gcc/config/i386/xm-bsd386.h
index 9deb7ef..6b8eee7 100644
--- a/contrib/gcc/config/i386/xm-bsd386.h
+++ b/contrib/gcc/config/i386/xm-bsd386.h
@@ -1,6 +1,3 @@
/* Configuration for GCC for Intel i386 running BSDI's BSD/386 as host. */
#include "i386/xm-i386.h"
-
-#define HAVE_STRERROR
-
diff --git a/contrib/gcc/config/i386/xm-dgux.h b/contrib/gcc/config/i386/xm-dgux.h
new file mode 100644
index 0000000..5bdb9be
--- /dev/null
+++ b/contrib/gcc/config/i386/xm-dgux.h
@@ -0,0 +1,12 @@
+
+/* Configuration for GCC for Intel i386 running DG/ux */
+
+/* looks just like sysv4 for now */
+
+#include "i386/xm-i386.h"
+#include "xm-svr4.h"
+
+/* If not compiled with GNU C, use the portable alloca. */
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
diff --git a/contrib/gcc/config/i386/xm-dos.h b/contrib/gcc/config/i386/xm-dos.h
index 1dd0c01..a734a81 100644
--- a/contrib/gcc/config/i386/xm-dos.h
+++ b/contrib/gcc/config/i386/xm-dos.h
@@ -1,8 +1,5 @@
#include "i386/xm-i386.h"
-/* Inhibit cccp.c's definition of putenv. */
-#define HAVE_PUTENV
-
/* Use semicolons to separate elements of a path. */
#define PATH_SEPARATOR ';'
@@ -15,6 +12,3 @@
#define MKTEMP_EACH_FILE 1
#define NO_PRECOMPILES 1
-
-/* sys_errlist proto in cccp.c doesn't match djgpp */
-#define HAVE_STRERROR
diff --git a/contrib/gcc/config/i386/xm-isc.h b/contrib/gcc/config/i386/xm-isc.h
index 7a0a47c..e686c5e 100644
--- a/contrib/gcc/config/i386/xm-isc.h
+++ b/contrib/gcc/config/i386/xm-isc.h
@@ -1,5 +1,3 @@
-#include "i386/xm-sysv3.h"
-
#ifndef REAL_ARITHMETIC
#define REAL_VALUE_ATOF(x, mode) strtod ((x), (char **)0)
extern double strtod ();
diff --git a/contrib/gcc/config/i386/xm-linux.h b/contrib/gcc/config/i386/xm-linux.h
index 42f097d..713bf3b2 100644
--- a/contrib/gcc/config/i386/xm-linux.h
+++ b/contrib/gcc/config/i386/xm-linux.h
@@ -1,5 +1,5 @@
-/* Configuration for GCC for Intel i386 running Linux.
- Copyright (C) 1993, 1994, 1995 Free Software Foundation, Inc.
+/* Configuration for GCC for Intel i386 running Linux-based GNU systems.
+ Copyright (C) 1993, 1994, 1995, 1997 Free Software Foundation, Inc.
Contributed by H.J. Lu (hjl@nynexst.com)
This file is part of GNU CC.
diff --git a/contrib/gcc/config/i386/xm-mingw32.h b/contrib/gcc/config/i386/xm-mingw32.h
new file mode 100644
index 0000000..d818142
--- /dev/null
+++ b/contrib/gcc/config/i386/xm-mingw32.h
@@ -0,0 +1,42 @@
+/* Configuration for GNU C-compiler for hosting on Windows32.
+ using GNU tools and the Windows32 API Library.
+ Copyright (C) 1997, 1998 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef USG
+#define USG 1
+#endif
+
+#ifndef ONLY_INT_FIELD
+#define ONLY_INT_FIELDS 1
+#endif
+
+#ifndef USE_PROTOTYPES
+#define USE_PROTOTYPES 1
+#endif
+
+#define NO_SYS_SIGLIST 1
+#define environ _environ
+
+/* Even though we support "/", allow "\" since everybody tests both. */
+#define DIR_SEPARATOR '\\'
+#define EXECUTABLE_SUFFIX ".exe"
+
+#undef PATH_SEPARATOR
+#define PATH_SEPARATOR ';'
diff --git a/contrib/gcc/config/i386/xm-openbsd.h b/contrib/gcc/config/i386/xm-openbsd.h
new file mode 100644
index 0000000..1a79e83
--- /dev/null
+++ b/contrib/gcc/config/i386/xm-openbsd.h
@@ -0,0 +1,23 @@
+/* Configuration file for i386 hosts running OpenBSD.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-openbsd.h>
+#include <i386/xm-i386.h>
+
diff --git a/contrib/gcc/config/i386/xm-os2.h b/contrib/gcc/config/i386/xm-os2.h
index 5ff2899..aed925e 100644
--- a/contrib/gcc/config/i386/xm-os2.h
+++ b/contrib/gcc/config/i386/xm-os2.h
@@ -1,7 +1,7 @@
/* Configuration for GNU compiler
for an Intel i386 or later processor running OS/2 2.x.
- Copyright (C) 1993, 1994, 1995 Free Software Foundation, Inc.
- Contributed by Samuel Figueroa (figueroa@cs.nyu.edu)
+ Copyright (C) 1993, 1994, 1995, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Samuel Figueroa (figueroa@apple.com)
This file is part of GNU CC.
@@ -28,20 +28,24 @@ Boston, MA 02111-1307, USA. */
#include <stdlib.h> /* this defines alloca */
#define USG
#define ONLY_INT_FIELDS
-#define HAVE_PUTENV
#define USE_PROTOTYPES 1
-#define bcmp(a,b,c) memcmp (a,b,c)
-#define bcopy(a,b,c) memcpy (b,a,c)
-#define bzero(a,b) memset (a,0,b)
-#define index strchr
-#define rindex strrchr
#define strcasecmp stricmp
#define kill(a,b) raise(b)
#define mktemp tmpnam
#else
+#ifdef __EMX__
+#define EMX
+#define USG
+#define BSTRING
+#define HAVE_PUTENV
+#define HAVE_VPRINTF
+#define HAVE_STRERROR
+#define strcasecmp stricmp
+#else
#define ____386BSD____
int spawnv (int modeflag, char *path, char *argv[]);
int spawnvp (int modeflag, char *path, char *argv[]);
+#endif /* __EMX__ */
#endif /* __IBMC__ */
#ifndef PATH_SEPARATOR
@@ -52,6 +56,14 @@ int spawnvp (int modeflag, char *path, char *argv[]);
#endif
#define EXECUTABLE_SUFFIX ".exe"
+
+/* The EMX compiler uses regular .o files */
+#ifndef __EMX__
#define OBJECT_SUFFIX ".obj"
+#endif
+
+/* This is required to make temporary file names unique on file
+ systems which severely restrict the length of file names. */
+#define MKTEMP_EACH_FILE
#include "i386/xm-i386.h"
diff --git a/contrib/gcc/config/i386/xm-osf.h b/contrib/gcc/config/i386/xm-osf.h
index fda50d9..4cbd36e 100644
--- a/contrib/gcc/config/i386/xm-osf.h
+++ b/contrib/gcc/config/i386/xm-osf.h
@@ -1,32 +1,2 @@
-/* Configuration for GNU C-compiler for 386 running OSF/1
- Copyright (C) 1994 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
#undef TRUE
#undef FALSE
-
-#include "i386/xm-i386.h"
-
-#define bcopy(a,b,c) memcpy (b,a,c)
-#define bzero(a,b) memset (a,0,b)
-#define bcmp(a,b,c) memcmp (a,b,c)
-
-#define HAVE_PUTENV
-#define HAVE_VPRINTF
-
diff --git a/contrib/gcc/config/i386/xm-osf1elf.h b/contrib/gcc/config/i386/xm-osf1elf.h
new file mode 100644
index 0000000..69ca9c1
--- /dev/null
+++ b/contrib/gcc/config/i386/xm-osf1elf.h
@@ -0,0 +1,6 @@
+/* Configuration for GCC for Intel i386 running OSF/1 1.3. */
+
+#ifndef HZ
+#include <machine/machtime.h>
+#define HZ DEFAULT_CLK_TCK
+#endif
diff --git a/contrib/gcc/config/i386/xm-sco.h b/contrib/gcc/config/i386/xm-sco.h
index 01a63d90..ad63449 100644
--- a/contrib/gcc/config/i386/xm-sco.h
+++ b/contrib/gcc/config/i386/xm-sco.h
@@ -1,18 +1,9 @@
/* Configuration for GCC for Intel i386 running SCO. */
-#include "i386/xm-sysv3.h"
-
-/* On SCO 3.2.1, ldexp rejects values outside [0.5, 1). */
-
-#define BROKEN_LDEXP
-
/* Big buffers improve performance. */
#define IO_BUFFER_SIZE (0x8000 - 1024)
-/* SCO has a very small ARG_MAX. */
-#define SMALL_ARG_MAX
-
#ifndef __GNUC__
/* The SCO compiler gets it wrong, and treats enumerated bitfields
as signed quantities, making it impossible to use an 8-bit enum
diff --git a/contrib/gcc/config/i386/xm-sco5.h b/contrib/gcc/config/i386/xm-sco5.h
new file mode 100644
index 0000000..6b22b1d
--- /dev/null
+++ b/contrib/gcc/config/i386/xm-sco5.h
@@ -0,0 +1,7 @@
+/* Configuration for GCC for Intel i386 running SCO. */
+
+/* Big buffers improve performance. */
+
+#define IO_BUFFER_SIZE (0x8000 - 1024)
+
+
diff --git a/contrib/gcc/config/i386/xm-sun.h b/contrib/gcc/config/i386/xm-sun.h
index d2e714e..de7c201 100644
--- a/contrib/gcc/config/i386/xm-sun.h
+++ b/contrib/gcc/config/i386/xm-sun.h
@@ -1,5 +1,5 @@
/* Configuration for GNU C-compiler for Intel 80386 running SunOS 4.0.
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -21,7 +21,3 @@ Boston, MA 02111-1307, USA. */
#define USG
#include "i386/xm-i386.h"
-
-#define bcopy(a,b,c) memcpy (b,a,c)
-#define bzero(a,b) memset (a,0,b)
-#define bcmp(a,b,c) memcmp (a,b,c)
diff --git a/contrib/gcc/config/i386/xm-sysv4.h b/contrib/gcc/config/i386/xm-sysv4.h
index 49d52b4..1365064 100644
--- a/contrib/gcc/config/i386/xm-sysv4.h
+++ b/contrib/gcc/config/i386/xm-sysv4.h
@@ -1,16 +1,5 @@
/* Configuration for GCC for Intel i386 running System V Release 4. */
-#include "i386/xm-i386.h"
-#include "xm-svr4.h"
-
-/* If not compiled with GNU C, use the portable alloca. */
-#ifndef __GNUC__
-#define USE_C_ALLOCA
-#endif
#ifdef __HIGHC__
#include <alloca.h> /* for MetaWare High-C on NCR System 3000 */
#endif
-
-/* Univel, at least, has a small ARG_MAX. Defining this is harmless
- except for causing extra stat calls in the driver program. */
-#define SMALL_ARG_MAX
diff --git a/contrib/gcc/config/i386/xm-vsta.h b/contrib/gcc/config/i386/xm-vsta.h
index bb333ae..735d1d5 100644
--- a/contrib/gcc/config/i386/xm-vsta.h
+++ b/contrib/gcc/config/i386/xm-vsta.h
@@ -1,26 +1,2 @@
-/* Configuration for GNU C-compiler for Intel 80386.
- Copyright (C) 1994 Free Software Foundation, Inc.
-
-This file is part of GNU CC.
-
-GNU CC is free software; you can redistribute it and/or modify
-it under the terms of the GNU General Public License as published by
-the Free Software Foundation; either version 2, or (at your option)
-any later version.
-
-GNU CC is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU General Public License
-along with GNU CC; see the file COPYING. If not, write to
-the Free Software Foundation, 59 Temple Place - Suite 330,
-Boston, MA 02111-1307, USA. */
-
-#define NO_STAB_H
-
-#include "i386/xm-i386.h"
-
/* Use semicolons to separate elements of a path. */
#define PATH_SEPARATOR ';'
diff --git a/contrib/gcc/config/libgloss.h b/contrib/gcc/config/libgloss.h
new file mode 100644
index 0000000..2f2ba56
--- /dev/null
+++ b/contrib/gcc/config/libgloss.h
@@ -0,0 +1,35 @@
+/* libgloss.h -- operating system specific defines to be used when
+ targeting GCC for Libgloss supported targets.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* The libgloss standard for crt0.s has the name based on the command line
+ option. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared:%{pg:pgcrt0%O%s}%{!pg:%{p:pcrt0%O%s}%{!p:crt0%O%s}}}"
+
+/* This file used to force LINK_SPEC to be the null string, but that is not
+ correct. LINK_SPEC is used to pass machine specific arguments to the
+ linker and hence can not be redefined here. LINK_SPEC is never used to
+ specify startup files or libraries, so it should never conflict with
+ libgloss. */
+
+/* Don't set the target flags, this is done by the linker script */
+#undef LIB_SPEC
+#define LIB_SPEC ""
diff --git a/contrib/gcc/config/linux-aout.h b/contrib/gcc/config/linux-aout.h
index 29fb8e9..ca8a39d 100644
--- a/contrib/gcc/config/linux-aout.h
+++ b/contrib/gcc/config/linux-aout.h
@@ -1,5 +1,5 @@
-/* Definitions for Linux
- Copyright (C) 1995 Free Software Foundation, Inc.
+/* Definitions for Linux-based GNU systems.
+ Copyright (C) 1995, 1997 Free Software Foundation, Inc.
Contributed by H.J. Lu (hjl@nynexst.com)
This file is part of GNU CC.
@@ -25,7 +25,7 @@ Boston, MA 02111-1307, USA. */
#undef HAVE_ATEXIT
#define HAVE_ATEXIT
-/* Linux uses ctype from glibc.a. I am not sure how complete it is.
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
For now, we play safe. It may change later. */
#if 0
@@ -36,16 +36,13 @@ Boston, MA 02111-1307, USA. */
#undef STARTFILE_SPEC
#define STARTFILE_SPEC "%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
#undef ASM_APP_OFF
#define ASM_APP_OFF "#NO_APP\n"
+#define SET_ASM_OP ".set"
+
/* We need that too. */
#define HANDLE_SYSV_PRAGMA
diff --git a/contrib/gcc/config/linux.h b/contrib/gcc/config/linux.h
index c82cfe2..b619d01 100644
--- a/contrib/gcc/config/linux.h
+++ b/contrib/gcc/config/linux.h
@@ -1,7 +1,7 @@
-/* Definitions for Linux with ELF format
- Copyright (C) 1995 Free Software Foundation, Inc.
+/* Definitions for Linux-based GNU systems with ELF format
+ Copyright (C) 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
Contributed by Eric Youngdale.
- Modified for stabs-in-ELF by H.J. Lu.
+ Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org).
This file is part of GNU CC.
@@ -26,7 +26,7 @@ Boston, MA 02111-1307, USA. */
#undef HAVE_ATEXIT
#define HAVE_ATEXIT
-/* Linux uses ctype from glibc.a. I am not sure how complete it is.
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
For now, we play safe. It may change later. */
#if 0
@@ -34,18 +34,16 @@ Boston, MA 02111-1307, USA. */
#define MULTIBYTE_CHARS 1
#endif
-/* There are conflicting reports about whether this system uses
- a different assembler syntax. wilson@cygnus.com says # is right. */
-#undef COMMENT_BEGIN
-#define COMMENT_BEGIN "#"
-
#undef ASM_APP_ON
#define ASM_APP_ON "#APP\n"
#undef ASM_APP_OFF
#define ASM_APP_OFF "#NO_APP\n"
+#define SET_ASM_OP ".set"
+
/* Use stabs instead of DWARF debug format. */
+#undef PREFERRED_DEBUGGING_TYPE
#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
#include "svr4.h"
@@ -61,28 +59,57 @@ Boston, MA 02111-1307, USA. */
fprintf (FILE, "\t.version\t\"01.01\"\n"); \
} while (0)
-#undef LIBGCC_SPEC
-#define LIBGCC_SPEC \
- "%{!shared:-lgcc}"
-
-
-/* Provide a STARTFILE_SPEC appropriate for Linux. Here we add
- the Linux magical crtbegin.o file (see crtstuff.c) which
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
provides part of the support for getting C++ file-scope static
object constructed before entering `main'. */
#undef STARTFILE_SPEC
#define STARTFILE_SPEC \
"%{!shared: \
- %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} \
+ %{!p:%{profile:gcrt1.o%s} \
+ %{!profile:crt1.o%s}}}} \
crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
-/* Provide a ENDFILE_SPEC appropriate for Linux. Here we tack on
- the Linux magical crtend.o file (see crtstuff.c) which
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
provides part of the support for getting C++ file-scope static
object constructed before entering `main', followed by a normal
- Linux "finalizer" file, `crtn.o'. */
+ GNU/Linux "finalizer" file, `crtn.o'. */
#undef ENDFILE_SPEC
#define ENDFILE_SPEC \
"%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+/* This is for -profile to use -lc_p instead of -lc. */
+#ifndef CC1_SPEC
+#define CC1_SPEC "%{profile:-p}"
+#endif
+
+#ifndef USE_GNULIBC_1
+#undef DEFAULT_VTABLE_THUNKS
+#define DEFAULT_VTABLE_THUNKS 1
+#endif
+
+#undef LIB_SPEC
+/* We no longer link with libc_p.a or libg.a by default. If you
+ want to profile or debug the GNU/Linux C library, please add
+ -profile or -ggdb to LDFLAGS at the link time, respectively. */
+#if 1
+#ifdef USE_GNULIBC_1
+#define LIB_SPEC \
+ "%{!shared: %{p:-lgmon} %{pg:-lgmon} %{profile:-lgmon -lc_p} \
+ %{!profile:%{!ggdb:-lc} %{ggdb:-lg}}}"
+#else
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+#endif
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
diff --git a/contrib/gcc/config/lynx.h b/contrib/gcc/config/lynx.h
index 04919d4..62c790b 100644
--- a/contrib/gcc/config/lynx.h
+++ b/contrib/gcc/config/lynx.h
@@ -1,5 +1,5 @@
/* Target independent definitions for LynxOS.
- Copyright (C) 1993, 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1993, 1994, 1995, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -133,12 +133,11 @@ do { \
#undef INIT_SECTION_ASM_OP
#undef EXTRA_SECTIONS
-#define EXTRA_SECTIONS in_const, in_bss, in_ctors, in_dtors, in_fini,
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors, in_fini
#undef EXTRA_SECTION_FUNCTIONS
#define EXTRA_SECTION_FUNCTIONS \
CONST_SECTION_FUNCTION \
- BSS_SECTION_FUNCTION \
CTORS_SECTION_FUNCTION \
DTORS_SECTION_FUNCTION \
FINI_SECTION_FUNCTION
diff --git a/contrib/gcc/config/netbsd.h b/contrib/gcc/config/netbsd.h
index 8c0974a..0fb4d40 100644
--- a/contrib/gcc/config/netbsd.h
+++ b/contrib/gcc/config/netbsd.h
@@ -13,11 +13,11 @@
#define GCC_INCLUDE_DIR "/usr/include"
#undef INCLUDE_DEFAULTS
-#define INCLUDE_DEFAULTS \
- { \
- { GPLUSPLUS_INCLUDE_DIR, 1, 1 }, \
- { GCC_INCLUDE_DIR, 0, 0 }, \
- { 0, 0, 0 } \
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
+ { 0, 0, 0, 0 } \
}
/* Under NetBSD, the normal location of the compiler back ends is the
@@ -58,8 +58,13 @@
#undef LINK_SPEC
#define LINK_SPEC \
- "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{static:-Bstatic} %{assert*}"
+ "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{R*} %{static:-Bstatic} %{assert*}"
+/* This defines which switch letters take arguments. */
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'R')
/* We have atexit(3). */
@@ -68,28 +73,36 @@
/* Implicit library calls should use memcpy, not bcopy, etc. */
#define TARGET_MEM_FUNCTIONS
+
+/* Handle #pragma weak and #pragma pack. */
+
+#define HANDLE_SYSV_PRAGMA
/*
* Some imports from svr4.h in support of shared libraries.
* Currently, we need the DECLARE_OBJECT_SIZE stuff.
*/
-/* Define the strings used for the special svr4 .type and .size directives.
- These strings generally do not vary from one system running svr4 to
- another, but if a given system (e.g. m88k running svr) needs to use
- different pseudo-op names for these, they may be overridden in the
- file which includes this one. */
+/* Define the strings used for the .type, .size, and .set directives.
+ These strings generally do not vary from one system running netbsd
+ to another, but if a given system needs to use different pseudo-op
+ names for these, they may be overridden in the file which includes
+ this one. */
#undef TYPE_ASM_OP
#undef SIZE_ASM_OP
+#undef SET_ASM_OP
#define TYPE_ASM_OP ".type"
#define SIZE_ASM_OP ".size"
+#define SET_ASM_OP ".set"
/* This is how we tell the assembler that a symbol is weak. */
#undef ASM_WEAKEN_LABEL
#define ASM_WEAKEN_LABEL(FILE,NAME) \
- do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ do { fputs ("\t.globl\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); \
+ fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
fputc ('\n', FILE); } while (0)
/* The following macro defines the format used to output the second
diff --git a/contrib/gcc/config/nextstep.c b/contrib/gcc/config/nextstep.c
index 823bcee..e909a94 100644
--- a/contrib/gcc/config/nextstep.c
+++ b/contrib/gcc/config/nextstep.c
@@ -1,5 +1,5 @@
/* Functions for generic NeXT as target machine for GNU C compiler.
- Copyright (C) 1989, 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
+ Copyright (C) 1989, 90-93, 96, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -18,6 +18,11 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
+#include "config.h"
+#include <stdio.h>
+#include "flags.h"
+#include "tree.h"
+
/* Make everything that used to go in the text section really go there. */
int flag_no_mach_text_sections = 0;
@@ -35,14 +40,17 @@ static int initial_optimize_flag;
extern char *get_directive_line ();
/* Called from check_newline via the macro HANDLE_PRAGMA.
- FINPUT is the source file input stream. */
+ FINPUT is the source file input stream.
+ CH is the first character after `#pragma'.
+ The result is 1 if the pragma was handled. */
-void
-handle_pragma (finput, get_line_function)
+int
+handle_pragma (finput, node)
FILE *finput;
- char *(*get_line_function) ();
+ tree node;
{
- register char *p = (*get_line_function) (finput);
+ int retval = 0;
+ register char *pname;
/* Record initial setting of optimize flag, so we can restore it. */
if (!pragma_initialized)
@@ -51,17 +59,24 @@ handle_pragma (finput, get_line_function)
initial_optimize_flag = optimize;
}
- if (OPT_STRCMP ("CC_OPT_ON"))
+ if (TREE_CODE (node) != IDENTIFIER_NODE)
+ return 0;
+
+ pname = IDENTIFIER_POINTER (node);
+
+ if (strcmp (pname, "CC_OPT_ON") == 0)
{
optimize = 1, obey_regdecls = 0;
warning ("optimization turned on");
+ retval = 1;
}
- else if (OPT_STRCMP ("CC_OPT_OFF"))
+ else if (strcmp (pname, "CC_OPT_OFF") == 0)
{
optimize = 0, obey_regdecls = 1;
warning ("optimization turned off");
+ retval = 1;
}
- else if (OPT_STRCMP ("CC_OPT_RESTORE"))
+ else if (strcmp (pname, "CC_OPT_RESTORE") == 0)
{
extern int initial_optimize_flag;
@@ -74,11 +89,14 @@ handle_pragma (finput, get_line_function)
optimize = initial_optimize_flag;
}
warning ("optimization level restored");
+ retval = 1;
}
- else if (OPT_STRCMP ("CC_WRITABLE_STRINGS"))
- flag_writable_strings = 1;
- else if (OPT_STRCMP ("CC_NON_WRITABLE_STRINGS"))
- flag_writable_strings = 0;
- else if (OPT_STRCMP ("CC_NO_MACH_TEXT_SECTIONS"))
- flag_no_mach_text_sections = 1;
+ else if (strcmp (pname, "CC_WRITABLE_STRINGS") == 0)
+ flag_writable_strings = retval = 1;
+ else if (strcmp (pname, "CC_NON_WRITABLE_STRINGS") == 0)
+ flag_writable_strings = 0, retval = 1;
+ else if (strcmp (pname, "CC_NO_MACH_TEXT_SECTIONS") == 0)
+ flag_no_mach_text_sections = retval = 1;
+
+ return retval;
}
diff --git a/contrib/gcc/config/nextstep.h b/contrib/gcc/config/nextstep.h
index 6e2e986..96435fc 100644
--- a/contrib/gcc/config/nextstep.h
+++ b/contrib/gcc/config/nextstep.h
@@ -1,6 +1,6 @@
-/* nextstep.h -- operating system specific defines to be used when
- targeting GCC for NeXTSTEP.
- Copyright (C) 1989, 1990, 1991, 1992, 1993 Free Software Foundation, Inc.
+/* Operating system specific defines to be used when targeting GCC
+ for NeXTSTEP.
+ Copyright (C) 1989, 90-93, 1996, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -27,26 +27,43 @@ Boston, MA 02111-1307, USA. */
#undef INCLUDE_DEFAULTS
#define INCLUDE_DEFAULTS \
{ \
- { GPLUSPLUS_INCLUDE_DIR, 1, 1 }, \
- { LOCAL_INCLUDE_DIR, 0, 1 }, \
- { TOOL_INCLUDE_DIR, 0, 1 }, \
- { GCC_INCLUDE_DIR, 0, 0 }, \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { LOCAL_INCLUDE_DIR, 0, 0, 1 }, \
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
/* These are for fixincludes-fixed ansi/bsd headers \
which wouldn't be found otherwise. \
(The use of string catenation here is OK since \
NeXT's native compiler is derived from GCC.) */ \
- { GCC_INCLUDE_DIR "/ansi", 0, 0 }, \
- { GCC_INCLUDE_DIR "/bsd", 0, 0 }, \
- { "/NextDeveloper/Headers", 0, 0 }, \
- { "/NextDeveloper/Headers/ansi", 0, 0 }, \
- { "/NextDeveloper/Headers/bsd", 0, 0 }, \
- { "/LocalDeveloper/Headers", 0, 0 }, \
- { "/LocalDeveloper/Headers/ansi", 0, 0 }, \
- { "/LocalDeveloper/Headers/bsd", 0, 0 }, \
- { "/NextDeveloper/2.0CompatibleHeaders", 0, 0 }, \
- { STANDARD_INCLUDE_DIR, 0, 0 }, \
- { "/usr/include/bsd", 0, 0 }, \
- { 0, 0, 0 } \
+ { GCC_INCLUDE_DIR "/ansi", 0, 0, 0 }, \
+ { GCC_INCLUDE_DIR "/bsd", 0, 0, 0 }, \
+ { "/NextDeveloper/Headers", 0, 0, 0 }, \
+ { "/NextDeveloper/Headers/ansi", 0, 0, 0 }, \
+ { "/NextDeveloper/Headers/bsd", 0, 0, 0 }, \
+ { "/LocalDeveloper/Headers", 0, 0, 0 }, \
+ { "/LocalDeveloper/Headers/ansi", 0, 0, 0 }, \
+ { "/LocalDeveloper/Headers/bsd", 0, 0, 0 }, \
+ { "/NextDeveloper/2.0CompatibleHeaders", 0, 0, 0 }, \
+ { STANDARD_INCLUDE_DIR, 0, 0, 0 }, \
+ { "/usr/include/bsd", 0, 0, 0 }, \
+ { 0, 0, 0, 0 } \
+ }
+#else /* CROSS_COMPILE */
+#undef INCLUDE_DEFAULTS
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GPLUSPLUS_INCLUDE_DIR, 0, 1, 1 }, \
+ { LOCAL_INCLUDE_DIR, 0, 0, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
+ { GCC_INCLUDE_DIR "/ansi", 0, 0, 0 }, \
+ { GCC_INCLUDE_DIR "/bsd", 0, 0, 0 }, \
+ { TOOL_INCLUDE_DIR, "BINUTILS", 0, 1 }, \
+ { TOOL_INCLUDE_DIR "/ansi", 0, 0, 0 }, \
+ { TOOL_INCLUDE_DIR "/bsd", 0, 0, 0 }, \
+ { STANDARD_INCLUDE_DIR, 0, 0, 0 }, \
+ { "/usr/include/bsd", 0, 0, 0 }, \
+ { 0, 0, 0, 0 } \
}
#endif /* CROSS_COMPILE */
@@ -152,11 +169,6 @@ Boston, MA 02111-1307, USA. */
%{p:%e-p profiling is no longer supported. Use -pg instead.} \
%{!p:-lposixcrt0.o}}}"
-/* Why not? */
-
-#undef DOLLARS_IN_IDENTIFIERS
-#define DOLLARS_IN_IDENTIFIERS 2
-
/* Allow #sscs (but don't do anything). */
#define SCCS_DIRECTIVE
@@ -240,7 +252,7 @@ Boston, MA 02111-1307, USA. */
/* How to parse #pragma's */
#undef HANDLE_PRAGMA
-#define HANDLE_PRAGMA(finput) handle_pragma (finput, &get_directive_line)
+#define HANDLE_PRAGMA(FINPUT, NODE) handle_pragma (FINPUT, NODE)
/* Give methods pretty symbol names on NeXT. */
@@ -254,6 +266,11 @@ Boston, MA 02111-1307, USA. */
(CLASS_NAME), (SEL_NAME)); \
} while (0)
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
+
/* Wrap new method names in quotes so the assembler doesn't gag.
Make Objective-C internal symbols local. */
@@ -263,7 +280,7 @@ Boston, MA 02111-1307, USA. */
else if (!strncmp (NAME, "_OBJC_", 6)) fprintf (FILE, "L%s", NAME); \
else if (!strncmp (NAME, ".objc_class_name_", 17)) \
fprintf (FILE, "%s", NAME); \
- else fprintf (FILE, "_%s", NAME); } while (0)
+ else fprintf (FILE, "%s%s", USER_LABEL_PREFIX, NAME); } while (0)
#undef ALIGN_ASM_OP
#define ALIGN_ASM_OP ".align"
@@ -565,3 +582,9 @@ objc_section_init () \
const_section (); \
} \
while (0)
+
+#ifdef ASM_COMMENT_START
+# undef ASM_COMMENT_START
+#endif
+
+#define ASM_COMMENT_START ";#"
diff --git a/contrib/gcc/config/openbsd.h b/contrib/gcc/config/openbsd.h
new file mode 100644
index 0000000..af8358e
--- /dev/null
+++ b/contrib/gcc/config/openbsd.h
@@ -0,0 +1,302 @@
+/* Base configuration file for all OpenBSD targets.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Common OpenBSD configuration.
+ All OpenBSD architectures include this file, which is intended as
+ a repository for common defines.
+
+ Some defines are common to all architectures, a few of them are
+ triggered by OBSD_* guards, so that we won't override architecture
+ defaults by mistakes.
+
+ OBSD_HAS_CORRECT_SPECS:
+ another mechanism provides correct specs already.
+ OBSD_NO_DYNAMIC_LIBRARIES:
+ no implementation of dynamic libraries.
+ OBSD_OLD_GAS:
+ older flavor of gas which needs help for PIC.
+ OBSD_HAS_DECLARE_FUNCTION_NAME, OBSD_HAS_DECLARE_FUNCTION_SIZE,
+ OBSD_HAS_DECLARE_OBJECT:
+ PIC support, FUNCTION_NAME/FUNCTION_SIZE are independent, whereas
+ the corresponding logic for OBJECTS is necessarily coupled.
+
+ There are also a few `default' defines such as ASM_WEAKEN_LABEL,
+ intended as common ground for arch that don't provide
+ anything suitable. */
+
+/* OPENBSD_NATIVE is defined only when gcc is configured as part of
+ the OpenBSD source tree, specifically through Makefile.bsd-wrapper.
+
+ In such a case the include path can be trimmed as there is no
+ distinction between system includes and gcc includes. */
+
+/* This configuration method, namely Makefile.bsd-wrapper and
+ OPENBSD_NATIVE is NOT recommended for building cross-compilers. */
+
+#ifdef OPENBSD_NATIVE
+
+#undef GCC_INCLUDE_DIR
+#define GCC_INCLUDE_DIR "/usr/include"
+
+/* The compiler is configured with ONLY the gcc/g++ standard headers. */
+#undef INCLUDE_DEFAULTS
+#define INCLUDE_DEFAULTS \
+ { \
+ { GPLUSPLUS_INCLUDE_DIR, "G++", 1, 1 }, \
+ { GCC_INCLUDE_DIR, "GCC", 0, 0 }, \
+ { 0, 0, 0, 0 } \
+ }
+
+/* Under OpenBSD, the normal location of the various *crt*.o files is the
+ /usr/lib directory. */
+#define STANDARD_STARTFILE_PREFIX "/usr/lib/"
+
+#endif
+
+
+/* Controlling the compilation driver. */
+
+/* CPP_SPEC appropriate for OpenBSD. We deal with -posix and -pthread.
+ XXX the way threads are handling currently is not very satisfying,
+ since all code must be compiled with -pthread to work.
+ This two-stage defines makes it easy to pick that for targets that
+ have subspecs. */
+#define OBSD_CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_POSIX_THREADS}"
+
+/* LIB_SPEC appropriate for OpenBSD. Select the appropriate libc,
+ depending on profiling and threads. Basically,
+ -lc(_r)?(_p)?, select _r for threads, and _p for p or pg. */
+#define OBSD_LIB_SPEC "-lc%{pthread:_r}%{p:_p}%{!p:%{pg:_p}}"
+
+#ifndef OBSD_HAS_CORRECT_SPECS
+
+#ifndef OBSD_NO_DYNAMIC_LIBRARIES
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
+ || (CHAR) == 'R')
+#endif
+
+#undef CPP_SPEC
+#define CPP_SPEC OBSD_CPP_SPEC
+
+#ifdef OBSD_OLD_GAS
+/* ASM_SPEC appropriate for OpenBSD. For some architectures, OpenBSD
+ still uses a special flavor of gas that needs to be told when generating
+ pic code. */
+#undef ASM_SPEC
+#define ASM_SPEC "%{fpic:-k} %{fPIC:-k -K} %|"
+#else
+/* Since we use gas, stdin -> - is a good idea, but we don't want to
+ override native specs just for that. */
+#ifndef ASM_SPEC
+#define ASM_SPEC "%|"
+#endif
+#endif
+
+/* LINK_SPEC appropriate for OpenBSD. Support for GCC options
+ -static, -assert, and -nostdlib. */
+#undef LINK_SPEC
+#ifdef OBSD_NO_DYNAMIC_LIBRARIES
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{assert*}"
+#else
+#define LINK_SPEC \
+ "%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp %{R*} %{static:-Bstatic} %{assert*}"
+#endif
+
+#undef LIB_SPEC
+#define LIB_SPEC OBSD_LIB_SPEC
+#endif
+
+
+/* Runtime target specification. */
+
+/* You must redefine CPP_PREDEFINES in any arch specific file. */
+#undef CPP_PREDEFINES
+
+/* Implicit calls to library routines. */
+
+/* Use memcpy and memset instead of bcopy and bzero. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Miscellaneous parameters. */
+
+/* Tell libgcc2.c that OpenBSD targets support atexit. */
+#define HAVE_ATEXIT
+
+/* Controlling debugging info: dbx options. */
+
+/* Don't use the `xsTAG;' construct in DBX output; OpenBSD systems that
+ use DBX don't support it. */
+#define DBX_NO_XREFS
+
+
+/* Support of shared libraries, mostly imported from svr4.h through netbsd. */
+/* Two differences from svr4.h:
+ - we use . - _func instead of a local label,
+ - we put extra spaces in expressions such as
+ .type _func , @function
+ This is more readable for a human being and confuses c++filt less. */
+
+/* Assembler format: output and generation of labels. */
+
+/* Define the strings used for the .type and .size directives.
+ These strings generally do not vary from one system running OpenBSD
+ to another, but if a given system needs to use different pseudo-op
+ names for these, they may be overridden in the arch specific file. */
+
+/* OpenBSD assembler is hacked to have .type & .size support even in a.out
+ format object files. Functions size are supported but not activated
+ yet (look for GRACE_PERIOD_EXPIRED in gas/config/obj-aout.c). */
+
+#undef TYPE_ASM_OP
+#undef SIZE_ASM_OP
+
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. */
+#undef TYPE_OPERAND_FMT
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Provision if extra assembler code is needed to declare a function's result
+ (taken from svr4, not needed yet actually). */
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries under OpenBSD. These macros also have to output the starting
+ labels for the relevant functions/objects. */
+
+#ifndef OBSD_HAS_DECLARE_FUNCTION_NAME
+/* Extra assembler code needed to declare a function properly.
+ Some assemblers may also need to also have something extra said
+ about the function's return value. We allow for that here. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fputs (" , ", FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+#endif
+
+#ifndef OBSD_HAS_DECLARE_FUNCTION_SIZE
+/* Declare the size of a function. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ fprintf (FILE, "\t%s\t", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fputs (" , . - ", FILE); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } while (0)
+#endif
+
+#ifndef OBSD_HAS_DECLARE_OBJECT
+/* Extra assembler code needed to declare an object properly. */
+#undef ASM_DECLARE_OBJECT_NAME
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fputs (" , ", FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, " , %d\n", int_size_in_bytes (TREE_TYPE (DECL)));\
+ } \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set by ASM_DECLARE_OBJECT_NAME
+ when it was run for the same decl. */
+#undef ASM_FINISH_DECLARE_OBJECT
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, " , %d\n", int_size_in_bytes (TREE_TYPE (DECL)));\
+ } \
+ } while (0)
+#endif
+
+
+/* Those are `generic' ways to weaken/globalize a label. We shouldn't need
+ to override a processor specific definition. Hence, #ifndef ASM_*
+ In case overriding turns out to be needed, one can always #undef ASM_*
+ before including this file. */
+
+/* Tell the assembler that a symbol is weak. */
+/* Note: netbsd arm32 assembler needs a .globl here. An override may
+ be needed when/if we go for arm32 support. */
+#ifndef ASM_WEAKEN_LABEL
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+#endif
+
+/* Tell the assembler that a symbol is global. */
+#ifndef ASM_GLOBALIZE_LABEL
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.globl\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while(0)
+#endif
+
+
+/* Storage layout. */
+
+/* We don't have to worry about binary compatibility with older C++ code,
+ but there is a big known bug with vtable thunks which has not been
+ fixed yet, so DON'T activate it by default. */
+/* #define DEFAULT_VTABLE_THUNKS 1 */
+
+
+/* Otherwise, since we support weak, gthr.h erroneously tries to use
+ #pragma weak. */
+#define GTHREAD_USE_WEAK 0
+
diff --git a/contrib/gcc/config/psos.h b/contrib/gcc/config/psos.h
new file mode 100644
index 0000000..d404300
--- /dev/null
+++ b/contrib/gcc/config/psos.h
@@ -0,0 +1,183 @@
+/* Operating system specific defines to be used when targeting GCC for some
+ embedded system running pSOS. We assume GNU tools with ELF, but
+ try to maintain compatibility with the MRI tools. Based on svr4.h.
+ Copyright (C) 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+ To use this file, make up a file with a name like:
+
+ ?????-psos.h
+
+ where ????? is replaced by the name of the basic hardware that you
+ are targeting for. Then, in the file ?????-psos.h, put something
+ like:
+
+ #include "?????.h"
+ #include "psos.h"
+
+ followed by any really system-specific defines (or overrides of
+ defines) which you find that you need.
+*/
+
+
+/* Define a symbol indicating that we are using psos.h. */
+
+#define USING_PSOS_H
+
+
+/* All pSOS targets currently use the ELF object file format. */
+
+#define OBJECT_FORMAT_ELF
+
+
+/* Provide a NULL STARTFILE_SPEC. The startfile cannot be specified
+ here because it depends on the architecture (e.g. 68K), the
+ board-support package (e.g. M162) and the run-time configuration
+ (e.g. application vs. ram-image vs. rom-image). Specify the
+ startfile in a linker-script created from the generic
+ architecture-specific linker-scripts. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+
+/* Predefined macros (independent of processor type). */
+
+#define CPP_PREDEFINES "-Dpsos"
+
+
+/* Implicit library calls should use ANSI memcpy rather than BSD
+ bcopy, etc. */
+
+#define TARGET_MEM_FUNCTIONS
+
+
+/* When using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+
+#define ASM_IDENTIFY_GCC(FILE) \
+do \
+ { \
+ fputs (".stabs \"gcc2_compiled.\", 0x3c, 0, 0, 0\n", FILE); \
+ } \
+while (0)
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* Switch into a generic section. */
+
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+ fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
+ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
+ (DECL) && DECL_READONLY_SECTION (DECL, RELOC) ? "a" : "aw")
+
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors
+ sections. */
+
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_ctors, in_dtors
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+extern void text_section ();
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+
+#ifndef INT_ASM_OP
+#define INT_ASM_OP ".long"
+#endif
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+
+/* Use DBX debugging info by default. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+#endif
+
+/* For pSOS we use DBX debugging info. */
+
+#define DBX_DEBUGGING_INFO
+
+
+/* Prevent generation of an exit function. */
+
+#define HAVE_ATEXIT
+
diff --git a/contrib/gcc/config/ptx4.h b/contrib/gcc/config/ptx4.h
new file mode 100644
index 0000000..aa31924
--- /dev/null
+++ b/contrib/gcc/config/ptx4.h
@@ -0,0 +1,859 @@
+/* Operating system specific defines to be used when targeting GCC for some
+ generic System V Release 4 system.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+ Renamed and changed to suit Dynix/ptx v4 and later.
+ Modified by Tim Wright (timw@sequent.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA.
+
+*/
+
+/* Define a symbol indicating that we are using svr4.h. */
+#define USING_SVR4_H
+
+/* For the sake of libgcc2.c, indicate target supports atexit. */
+#define HAVE_ATEXIT
+
+/* Cpp, assembler, linker, library, and startfile spec's. */
+
+/* This defines which switch letters take arguments. On svr4, most of
+ the normal cases (defined in gcc.c) apply, and we also have -h* and
+ -z* options (for the linker). Note however that there is no such
+ thing as a -T option for svr4. */
+
+#define SWITCH_TAKES_ARG(CHAR) \
+ ( (CHAR) == 'D' \
+ || (CHAR) == 'U' \
+ || (CHAR) == 'o' \
+ || (CHAR) == 'e' \
+ || (CHAR) == 'u' \
+ || (CHAR) == 'I' \
+ || (CHAR) == 'm' \
+ || (CHAR) == 'L' \
+ || (CHAR) == 'A' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z')
+
+/* This defines which multi-letter switches take arguments. On svr4,
+ there are no such switches except those implemented by GCC itself. */
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ && strcmp (STR, "Tdata") && strcmp (STR, "Ttext") \
+ && strcmp (STR, "Tbss"))
+
+/* You should redefine CPP_PREDEFINES in any file which includes this one.
+ The definition should be appropriate for the type of target system
+ involved, and it should include any -A (assertion) options which are
+ appropriate for the given target system. */
+#undef CPP_PREDEFINES
+
+/* Provide an ASM_SPEC appropriate for svr4. Here we try to support as
+ many of the specialized svr4 assembler options as seems reasonable,
+ given that there are certain options which we can't (or shouldn't)
+ support directly due to the fact that they conflict with other options
+ for other svr4 tools (e.g. ld) or with other options for GCC itself.
+ For example, we don't support the -o (output file) or -R (remove
+ input file) options because GCC already handles these things. We
+ also don't support the -m (run m4) option for the assembler because
+ that conflicts with the -m (produce load map) option of the svr4
+ linker. We do however allow passing arbitrary options to the svr4
+ assembler via the -Wa, option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -Ym,* or -Yd,*
+ option.
+*/
+
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "-no_0f_fix %{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+
+/* svr4 assemblers need the `-' (indicating input from stdin) to come after
+ the -o option (and its argument) for some reason. If we try to put it
+ before the -o option, the assembler will try to read the file named as
+ the output file in the -o option as an input file (after it has already
+ written some stuff to it) and the binary stuff contained therein will
+ cause totally confuse the assembler, resulting in many spurious error
+ messages. */
+
+#undef ASM_FINAL_SPEC
+#define ASM_FINAL_SPEC "%{pipe:-}"
+
+/* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default
+ standard C library (unless we are building a shared library). */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
+
+/* Provide a LIBGCC_SPEC appropriate for svr4. We also want to exclude
+ libgcc when -symbolic. */
+
+#undef LIBGCC_SPEC
+#define LIBGCC_SPEC "%{!shared:%{!symbolic:-lgcc}}"
+
+/* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own
+ magical crtend.o file (see crtstuff.c) which provides part of the
+ support for getting C++ file-scope static object constructed before
+ entering `main', followed by the normal svr3/svr4 "finalizer" file,
+ which is either `gcrtn.o' or `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o}%{!pg:crtn.o%s}"
+
+/* Provide a LINK_SPEC appropriate for svr4. Here we provide support
+ for the special GCC options -static, -shared, and -symbolic which
+ allow us to link things in one of these three modes by applying the
+ appropriate combinations of options at link-time. We also provide
+ support here for as many of the other svr4 linker options as seems
+ reasonable, given that some of them conflict with options for other
+ svr4 tools (e.g. the assembler). In particular, we do support the
+ -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*,
+ -l*, -o*, -r, -s, -u*, and -L* options are directly supported
+ by gcc.c itself. We don't directly support the -m (generate load
+ map) option because that conflicts with the -m (run m4) option of
+ the svr4 assembler. We also don't directly support the svr4 linker's
+ -I* or -M* options because these conflict with existing GCC options.
+ We do however allow passing arbitrary options to the svr4 linker
+ via the -Wl, option. We don't support the svr4 linker's -a option
+ at all because it is totally useless and because it conflicts with
+ GCC's own -a option.
+
+ Note that gcc doesn't allow a space to follow -Y in a -YP,* option.
+
+ When the -G link option is used (-shared and -symbolic) a final link is
+ not being done. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{b} %{Wl,*:%*} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{!YP,*:%{p:-Y P,/lib/libp:/usr/lib/libp:/lib:/usr/lib} \
+ %{!p:-Y P,/lib:/usr/lib}} \
+ %{Qy:} %{!Qn:-Qy}"
+
+/* Gcc automatically adds in one of the files /lib/values-Xc.o,
+ /lib/values-Xa.o, or /lib/values-Xt.o for each final link
+ step (depending upon the other gcc options selected, such as
+ -traditional and -ansi). These files each contain one (initialized)
+ copy of a special variable called `_lib_version'. Each one of these
+ files has `_lib_version' initialized to a different (enum) value.
+ The SVR4 library routines query the value of `_lib_version' at run
+ to decide how they should behave. Specifically, they decide (based
+ upon the value of `_lib_version') if they will act in a strictly ANSI
+ conforming manner or not.
+*/
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}}}\
+ %{pg:gcrti.o%s}%{!pg:crti.o%s} \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}} \
+ crtbegin.o%s"
+
+/* Attach a special .ident directive to the end of the file to identify
+ the version of GCC which compiled this code. The format of the
+ .ident string is patterned after the ones produced by native svr4
+ C compilers. */
+
+#define IDENT_ASM_OP ".ident"
+
+#define ASM_FILE_END(FILE) \
+do { \
+ fprintf ((FILE), "\t%s\t\"GCC: (GNU) %s\"\n", \
+ IDENT_ASM_OP, version_string); \
+ } while (0)
+
+/* Allow #sccs in preprocessor. */
+
+#define SCCS_DIRECTIVE
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+/* Use periods rather than dollar signs in special g++ assembler names. */
+
+#define NO_DOLLAR_IN_LABEL
+
+/* Writing `int' for a bitfield forces int alignment for the structure. */
+
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Implicit library calls should use memcpy, not bcopy, etc. */
+
+#define TARGET_MEM_FUNCTIONS
+
+/* Handle #pragma weak and #pragma pack. */
+
+#define HANDLE_SYSV_PRAGMA
+
+/* System V Release 4 uses DWARF debugging info. */
+
+#define DWARF_DEBUGGING_INFO
+
+/* The numbers used to denote specific machine registers in the System V
+ Release 4 DWARF debugging information are quite likely to be totally
+ different from the numbers used in BSD stabs debugging information
+ for the same kind of target machine. Thus, we undefine the macro
+ DBX_REGISTER_NUMBER here as an extra inducement to get people to
+ provide proper machine-specific definitions of DBX_REGISTER_NUMBER
+ (which is also used to provide DWARF registers numbers in dwarfout.c)
+ in their tm.h files which include this file. */
+
+#undef DBX_REGISTER_NUMBER
+
+/* gas on SVR4 supports the use of .stabs. Permit -gstabs to be used
+ in general, although it will only work when using gas. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Use DWARF debugging info by default. */
+
+#ifndef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF_DEBUG
+#endif
+
+/* Make LBRAC and RBRAC addresses relative to the start of the
+ function. The native Solaris stabs debugging format works this
+ way, gdb expects it, and it reduces the number of relocation
+ entries. */
+
+#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+
+/* When using stabs, gcc2_compiled must be a stabs entry, not an
+ ordinary symbol, or gdb won't see it. The stabs entry must be
+ before the N_SO in order for gdb to find it. */
+
+#define ASM_IDENTIFY_GCC(FILE) \
+do \
+ { \
+ if (write_symbols != DBX_DEBUG) \
+ fputs ("gcc2_compiled.:\n", FILE); \
+ else \
+ fputs ("\t.stabs\t\"gcc2_compiled.\", 0x3c, 0, 0, 0\n", FILE); \
+ } \
+while (0)
+
+/* Like block addresses, stabs line numbers are relative to the
+ current function. */
+
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+do \
+ { \
+ static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,.LM%d-", \
+ line, sym_lineno); \
+ assemble_name (file, \
+ XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));\
+ fprintf (file, "\n.LM%d:\n", sym_lineno); \
+ sym_lineno += 1; \
+ } \
+while (0)
+
+/* In order for relative line numbers to work, we must output the
+ stabs entry for the function name first. */
+
+#define DBX_FUNCTION_FIRST
+
+/* Generate a blank trailing N_SO to mark the end of the .o file, since
+ we can't depend upon the linker to mark .o file boundaries with
+ embedded stabs. */
+
+#define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \
+ fprintf (FILE, \
+ "\t.text\n\t.stabs \"\",%d,0,0,.Letext\n.Letext:\n", N_SO)
+
+/* Define the actual types of some ANSI-mandated types. (These
+ definitions should work for most SVR4 systems). */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+/* This causes trouble, because it requires the host machine
+ to support ANSI C. */
+/* #define MULTIBYTE_CHARS */
+
+#undef ASM_BYTE_OP
+#define ASM_BYTE_OP ".byte"
+
+#undef SET_ASM_OP
+#define SET_ASM_OP ".set"
+
+/* This is how to begin an assembly language file. Most svr4 assemblers want
+ at least a .file directive to come first, and some want to see a .version
+ directive come right after that. Here we just establish a default
+ which generates only the .file directive. If you need a .version
+ directive for any specific target, you should override this definition
+ in the target-specific file which includes this one. */
+
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ output_file_directive ((FILE), main_input_filename)
+
+/* This is how to allocate empty space in some section. The .zero
+ pseudo-op is used for this on most svr4 assemblers. */
+
+#define SKIP_ASM_OP ".zero"
+
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t%s\t%u\n", SKIP_ASM_OP, (SIZE))
+
+/* The prefix to add to user-visible assembler symbols.
+
+ For System V Release 4 the convention is *not* to prepend a leading
+ underscore onto user-level symbol names. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+
+ For most svr4 systems, the convention is that any symbol which begins
+ with a period is not put into the linker symbol table by the assembler. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE, PREFIX, NUM) \
+do { \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM); \
+} while (0)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'.
+
+ For most svr4 systems, the convention is that any symbol which begins
+ with a period is not put into the linker symbol table by the assembler. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
+do { \
+ sprintf (LABEL, "*.%s%d", PREFIX, NUM); \
+} while (0)
+
+/* Output the label which precedes a jumptable. Note that for all svr4
+ systems where we actually generate jumptables (which is to say every
+ svr4 target except i386, where we use casesi instead) we put the jump-
+ tables into the .rodata section and since other stuff could have been
+ put into the .rodata section prior to any given jumptable, we have to
+ make sure that the location counter for the .rodata section gets pro-
+ perly re-aligned prior to the actual beginning of the jump table. */
+
+#define ALIGN_ASM_OP ".align"
+
+#ifndef ASM_OUTPUT_BEFORE_CASE_LABEL
+#define ASM_OUTPUT_BEFORE_CASE_LABEL(FILE,PREFIX,NUM,TABLE) \
+ ASM_OUTPUT_ALIGN ((FILE), 2);
+#endif
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE,PREFIX,NUM,JUMPTABLE) \
+ do { \
+ ASM_OUTPUT_BEFORE_CASE_LABEL (FILE, PREFIX, NUM, JUMPTABLE) \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, PREFIX, NUM); \
+ } while (0)
+
+/* The standard SVR4 assembler seems to require that certain builtin
+ library routines (e.g. .udiv) be explicitly declared as .globl
+ in each assembly file where they are referenced. */
+
+#define ASM_OUTPUT_EXTERNAL_LIBCALL(FILE, FUN) \
+ ASM_GLOBALIZE_LABEL (FILE, XSTR (FUN, 0))
+
+/* This says how to output assembler code to declare an
+ uninitialized external linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define COMMON_ASM_OP ".comm"
+
+#undef ASM_OUTPUT_ALIGNED_COMMON
+#define ASM_OUTPUT_ALIGNED_COMMON(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "\t%s\t", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (SIZE), (ALIGN) / BITS_PER_UNIT); \
+} while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. Under SVR4,
+ the linker seems to want the alignment of data objects
+ to depend on their types. We do exactly that here. */
+
+#define LOCAL_ASM_OP ".local"
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fprintf ((FILE), "\t%s\t", LOCAL_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), "\n"); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+/* This is the pseudo-op used to generate a 32-bit word of data with a
+ specific value in some section. This is the same for all known svr4
+ assemblers. */
+
+#define INT_ASM_OP ".long"
+
+/* This is the pseudo-op used to generate a contiguous sequence of byte
+ values from a double-quoted string WITHOUT HAVING A TERMINATING NUL
+ AUTOMATICALLY APPENDED. This is the same for most svr4 assemblers. */
+
+#undef ASCII_DATA_ASM_OP
+#define ASCII_DATA_ASM_OP ".ascii"
+
+/* Support const sections and the ctors and dtors sections for g++.
+ Note that there appears to be two different ways to support const
+ sections at the moment. You can either #define the symbol
+ READONLY_DATA_SECTION (giving it some code which switches to the
+ readonly data section) or else you can #define the symbols
+ EXTRA_SECTIONS, EXTRA_SECTION_FUNCTIONS, SELECT_SECTION, and
+ SELECT_RTX_SECTION. We do both here just to be on the safe side. */
+
+#define USE_CONST_SECTION 1
+
+#define CONST_SECTION_ASM_OP ".section\t.rodata"
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"aw\""
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"aw\""
+
+/* On svr4, we *do* have support for the .init and .fini sections, and we
+ can put stuff in there to be executed before and after `main'. We let
+ crtstuff.c and other files know this by defining the following symbols.
+ The definitions say how to change sections to the .init and .fini
+ sections. This is the same for all known svr4 assemblers. */
+
+#define INIT_SECTION_ASM_OP ".section\t.init"
+#define FINI_SECTION_ASM_OP ".section\t.fini"
+
+/* A default list of other sections which we might be "in" at any given
+ time. For targets that use additional sections (e.g. .tdesc) you
+ should override this definition in the target-specific file which
+ includes this file. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors
+
+/* A default list of extra section function definitions. For targets
+ that use additional sections (e.g. .tdesc) you should override this
+ definition in the target-specific file which includes this file. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#define READONLY_DATA_SECTION() const_section ()
+
+extern void text_section ();
+
+#define CONST_SECTION_FUNCTION \
+void \
+const_section () \
+{ \
+ if (!USE_CONST_SECTION) \
+ text_section(); \
+ else if (in_section != in_const) \
+ { \
+ fprintf (asm_out_file, "%s\n", CONST_SECTION_ASM_OP); \
+ in_section = in_const; \
+ } \
+}
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+/* Switch into a generic section.
+ This is currently only used to support section attributes.
+
+ We make the section read-only and executable for a function decl,
+ read-only for a const data decl, and writable for a non-const data decl. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+ fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
+ (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
+ (DECL) && DECL_READONLY_SECTION (DECL, RELOC) ? "a" : "aw")
+
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement or statements to switch to the appropriate
+ section for output of DECL. DECL is either a `VAR_DECL' node
+ or a constant of some sort. RELOC indicates whether forming
+ the initial value of DECL requires link-time relocations. */
+
+#define SELECT_SECTION(DECL,RELOC) \
+{ \
+ if (TREE_CODE (DECL) == STRING_CST) \
+ { \
+ if (! flag_writable_strings) \
+ const_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (DECL) == VAR_DECL) \
+ { \
+ if ((flag_pic && RELOC) \
+ || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
+ || !DECL_INITIAL (DECL) \
+ || (DECL_INITIAL (DECL) != error_mark_node \
+ && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ data_section (); \
+ else \
+ const_section (); \
+ } \
+ else \
+ const_section (); \
+}
+
+/* A C statement or statements to switch to the appropriate
+ section for output of RTX in mode MODE. RTX is some kind
+ of constant in RTL. The argument MODE is redundant except
+ in the case of a `const_int' rtx. Currently, these always
+ go into the const section. */
+
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) const_section()
+
+/* Define the strings used for the special svr4 .type and .size directives.
+ These strings generally do not vary from one system running svr4 to
+ another, but if a given system (e.g. m88k running svr) needs to use
+ different pseudo-op names for these, they may be overridden in the
+ file which includes this one. */
+
+#define TYPE_ASM_OP ".type"
+#define SIZE_ASM_OP ".size"
+
+/* This is how we tell the assembler that a symbol is weak. */
+
+#define ASM_WEAKEN_LABEL(FILE,NAME) \
+ do { fputs ("\t.weak\t", FILE); assemble_name (FILE, NAME); \
+ fputc ('\n', FILE); } while (0)
+
+/* The following macro defines the format used to output the second
+ operand of the .type assembler directive. Different svr4 assemblers
+ expect various different forms for this operand. The one given here
+ is just a default. You may need to override it in your machine-
+ specific tm.h file (depending upon the particulars of your assembler). */
+
+#define TYPE_OPERAND_FMT "@%s"
+
+/* Write the extra assembler code needed to declare a function's result.
+ Most svr4 assemblers don't require any special declaration of the
+ result value, but there are exceptions. */
+
+#ifndef ASM_DECLARE_RESULT
+#define ASM_DECLARE_RESULT(FILE, RESULT)
+#endif
+
+/* These macros generate the special .type and .size directives which
+ are used to set the corresponding fields of the linker symbol table
+ entries in an ELF object file under SVR4. These macros also output
+ the starting labels for the relevant functions/objects. */
+
+/* Write the extra assembler code needed to declare a function properly.
+ Some svr4 assemblers need to also have something extra said about the
+ function's return value. We allow for that here. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "function"); \
+ putc ('\n', FILE); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Write the extra assembler code needed to declare an object properly. */
+
+#define ASM_DECLARE_OBJECT_NAME(FILE, NAME, DECL) \
+ do { \
+ fprintf (FILE, "\t%s\t ", TYPE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ putc (',', FILE); \
+ fprintf (FILE, TYPE_OPERAND_FMT, "object"); \
+ putc ('\n', FILE); \
+ size_directive_output = 0; \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL)) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ } while (0)
+
+/* Output the size directive for a decl in rest_of_decl_compilation
+ in the case where we did not do so before the initializer.
+ Once we find the error_mark_node, we know that the value of
+ size_directive_output was set
+ by ASM_DECLARE_OBJECT_NAME when it was run for the same decl. */
+
+#define ASM_FINISH_DECLARE_OBJECT(FILE, DECL, TOP_LEVEL, AT_END) \
+do { \
+ char *name = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \
+ if (!flag_inhibit_size_directive && DECL_SIZE (DECL) \
+ && ! AT_END && TOP_LEVEL \
+ && DECL_INITIAL (DECL) == error_mark_node \
+ && !size_directive_output) \
+ { \
+ size_directive_output = 1; \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, name); \
+ fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ } \
+ } while (0)
+
+/* This is how to declare the size of a function. */
+
+#define ASM_DECLARE_FUNCTION_SIZE(FILE, FNAME, DECL) \
+ do { \
+ if (!flag_inhibit_size_directive) \
+ { \
+ char label[256]; \
+ static int labelno; \
+ labelno++; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "Lfe", labelno); \
+ ASM_OUTPUT_INTERNAL_LABEL (FILE, "Lfe", labelno); \
+ fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
+ assemble_name (FILE, (FNAME)); \
+ fprintf (FILE, ","); \
+ assemble_name (FILE, label); \
+ fprintf (FILE, "-"); \
+ assemble_name (FILE, (FNAME)); \
+ putc ('\n', FILE); \
+ } \
+ } while (0)
+
+/* A table of bytes codes used by the ASM_OUTPUT_ASCII and
+ ASM_OUTPUT_LIMITED_STRING macros. Each byte in the table
+ corresponds to a particular byte value [0..255]. For any
+ given byte value, if the value in the corresponding table
+ position is zero, the given character can be output directly.
+ If the table value is 1, the byte must be output as a \ooo
+ octal escape. If the tables value is anything else, then the
+ byte value should be output as a \ followed by the value
+ in the table. Note that we can use standard UN*X escape
+ sequences for many control characters, but we don't use
+ \a to represent BEL because some svr4 assemblers (e.g. on
+ the i386) don't know about that. Also, we don't use \v
+ since some versions of gas, such as 2.2 did not accept it. */
+
+#define ESCAPES \
+"\1\1\1\1\1\1\1\1btn\1fr\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\0\0\"\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\\\0\0\0\
+\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\
+\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1"
+
+/* Some svr4 assemblers have a limit on the number of characters which
+ can appear in the operand of a .string directive. If your assembler
+ has such a limitation, you should define STRING_LIMIT to reflect that
+ limit. Note that at least some svr4 assemblers have a limit on the
+ actual number of bytes in the double-quoted string, and that they
+ count each character in an escape sequence as one byte. Thus, an
+ escape sequence like \377 would count as four bytes.
+
+ If your target assembler doesn't support the .string directive, you
+ should define this to zero.
+*/
+
+#define STRING_LIMIT ((unsigned) 256)
+
+#define STRING_ASM_OP ".string"
+
+/* The routine used to output NUL terminated strings. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable, especially for targets like the i386
+ (where the only alternative is to output character sequences as
+ comma separated lists of numbers). */
+
+#define ASM_OUTPUT_LIMITED_STRING(FILE, STR) \
+ do \
+ { \
+ register unsigned char *_limited_str = (unsigned char *) (STR); \
+ register unsigned ch; \
+ fprintf ((FILE), "\t%s\t\"", STRING_ASM_OP); \
+ for (; ch = *_limited_str; _limited_str++) \
+ { \
+ register int escape; \
+ switch (escape = ESCAPES[ch]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ break; \
+ } \
+ } \
+ fprintf ((FILE), "\"\n"); \
+ } \
+ while (0)
+
+/* The routine used to output sequences of byte values. We use a special
+ version of this for most svr4 targets because doing so makes the
+ generated assembly code more compact (and thus faster to assemble)
+ as well as more readable. Note that if we find subparts of the
+ character sequence which end with NUL (and which are shorter than
+ STRING_LIMIT) we output those using ASM_OUTPUT_LIMITED_STRING. */
+
+#undef ASM_OUTPUT_ASCII
+#define ASM_OUTPUT_ASCII(FILE, STR, LENGTH) \
+ do \
+ { \
+ register unsigned char *_ascii_bytes = (unsigned char *) (STR); \
+ register unsigned char *limit = _ascii_bytes + (LENGTH); \
+ register unsigned bytes_in_chunk = 0; \
+ for (; _ascii_bytes < limit; _ascii_bytes++) \
+ { \
+ register unsigned char *p; \
+ if (bytes_in_chunk >= 60) \
+ { \
+ fprintf ((FILE), "\"\n"); \
+ bytes_in_chunk = 0; \
+ } \
+ for (p = _ascii_bytes; p < limit && *p != '\0'; p++) \
+ continue; \
+ if (p < limit && (p - _ascii_bytes) <= STRING_LIMIT) \
+ { \
+ if (bytes_in_chunk > 0) \
+ { \
+ fprintf ((FILE), "\"\n"); \
+ bytes_in_chunk = 0; \
+ } \
+ ASM_OUTPUT_LIMITED_STRING ((FILE), _ascii_bytes); \
+ _ascii_bytes = p; \
+ } \
+ else \
+ { \
+ register int escape; \
+ register unsigned ch; \
+ if (bytes_in_chunk == 0) \
+ fprintf ((FILE), "\t%s\t\"", ASCII_DATA_ASM_OP); \
+ switch (escape = ESCAPES[ch = *_ascii_bytes]) \
+ { \
+ case 0: \
+ putc (ch, (FILE)); \
+ bytes_in_chunk++; \
+ break; \
+ case 1: \
+ fprintf ((FILE), "\\%03o", ch); \
+ bytes_in_chunk += 4; \
+ break; \
+ default: \
+ putc ('\\', (FILE)); \
+ putc (escape, (FILE)); \
+ bytes_in_chunk += 2; \
+ break; \
+ } \
+ } \
+ } \
+ if (bytes_in_chunk > 0) \
+ fprintf ((FILE), "\"\n"); \
+ } \
+ while (0)
+
+/* All SVR4 targets use the ELF object file format. */
+#define OBJECT_FORMAT_ELF
diff --git a/contrib/gcc/config/sparc/aout.h b/contrib/gcc/config/sparc/aout.h
new file mode 100644
index 0000000..478d710
--- /dev/null
+++ b/contrib/gcc/config/sparc/aout.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for SPARC using a.out.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h" /* SPARC definitions */
+#include "aoutos.h" /* A.out definitions */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
diff --git a/contrib/gcc/config/sparc/bsd.h b/contrib/gcc/config/sparc/bsd.h
new file mode 100644
index 0000000..761abe2
--- /dev/null
+++ b/contrib/gcc/config/sparc/bsd.h
@@ -0,0 +1,7 @@
+#include "sparc/sparc.h"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s}%{!pg:%{p:gcrt0.o%s}%{!p:crt0.o%s}}"
diff --git a/contrib/gcc/config/sparc/elf.h b/contrib/gcc/config/sparc/elf.h
new file mode 100644
index 0000000..70cb26a
--- /dev/null
+++ b/contrib/gcc/config/sparc/elf.h
@@ -0,0 +1,42 @@
+/* Definitions of target machine for GNU compiler,
+ for SPARC running in an embedded environment using the ELF file format.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sol2.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__elf__ -Acpu(sparc) -Amachine(sparc)"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* Use the default. */
+#undef LINK_SPEC
+
+/* Don't set the target flags, this is done by the linker script */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* FIXME: until fixed */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
diff --git a/contrib/gcc/config/sparc/gmon-sol2.c b/contrib/gcc/config/sparc/gmon-sol2.c
new file mode 100644
index 0000000..2a5b898
--- /dev/null
+++ b/contrib/gcc/config/sparc/gmon-sol2.c
@@ -0,0 +1,429 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Mangled into a form that works on Sparc Solaris 2 by Mark Eichin
+ * for Cygnus Support, July 1992.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)gmon.c 5.3 (Berkeley) 5/22/91";
+#endif /* not lint */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#if 0
+#include "sparc/gmon.h"
+#else
+struct phdr {
+ char *lpc;
+ char *hpc;
+ int ncnt;
+};
+#define HISTFRACTION 2
+#define HISTCOUNTER unsigned short
+#define HASHFRACTION 1
+#define ARCDENSITY 2
+#define MINARCS 50
+struct tostruct {
+ char *selfpc;
+ long count;
+ unsigned short link;
+};
+struct rawarc {
+ unsigned long raw_frompc;
+ unsigned long raw_selfpc;
+ long raw_count;
+};
+#define ROUNDDOWN(x,y) (((x)/(y))*(y))
+#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
+
+#endif
+
+/* extern mcount() asm ("mcount"); */
+/*extern*/ char *minbrk /* asm ("minbrk") */;
+
+ /*
+ * froms is actually a bunch of unsigned shorts indexing tos
+ */
+static int profiling = 3;
+static unsigned short *froms;
+static struct tostruct *tos = 0;
+static long tolimit = 0;
+static char *s_lowpc = 0;
+static char *s_highpc = 0;
+static unsigned long s_textsize = 0;
+
+static int ssiz;
+static char *sbuf;
+static int s_scale;
+ /* see profil(2) where this is describe (incorrectly) */
+#define SCALE_1_TO_1 0x10000L
+
+#define MSG "No space for profiling buffer(s)\n"
+
+static void moncontrol();
+
+void monstartup(lowpc, highpc)
+ char *lowpc;
+ char *highpc;
+{
+ int monsize;
+ char *buffer;
+ register int o;
+
+ /*
+ * round lowpc and highpc to multiples of the density we're using
+ * so the rest of the scaling (here and in gprof) stays in ints.
+ */
+ lowpc = (char *)
+ ROUNDDOWN((unsigned)lowpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_lowpc = lowpc;
+ highpc = (char *)
+ ROUNDUP((unsigned)highpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_highpc = highpc;
+ s_textsize = highpc - lowpc;
+ monsize = (s_textsize / HISTFRACTION) + sizeof(struct phdr);
+ buffer = sbrk( monsize );
+ if ( buffer == (char *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ return;
+ }
+ froms = (unsigned short *) sbrk( s_textsize / HASHFRACTION );
+ if ( froms == (unsigned short *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ return;
+ }
+ tolimit = s_textsize * ARCDENSITY / 100;
+ if ( tolimit < MINARCS ) {
+ tolimit = MINARCS;
+ } else if ( tolimit > 65534 ) {
+ tolimit = 65534;
+ }
+ tos = (struct tostruct *) sbrk( tolimit * sizeof( struct tostruct ) );
+ if ( tos == (struct tostruct *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ tos = 0;
+ return;
+ }
+ minbrk = sbrk(0);
+ tos[0].link = 0;
+ sbuf = buffer;
+ ssiz = monsize;
+ ( (struct phdr *) buffer ) -> lpc = lowpc;
+ ( (struct phdr *) buffer ) -> hpc = highpc;
+ ( (struct phdr *) buffer ) -> ncnt = ssiz;
+ monsize -= sizeof(struct phdr);
+ if ( monsize <= 0 )
+ return;
+ o = highpc - lowpc;
+ if( monsize < o )
+#ifndef hp300
+ s_scale = ( (float) monsize / o ) * SCALE_1_TO_1;
+#else /* avoid floating point */
+ {
+ int quot = o / monsize;
+
+ if (quot >= 0x10000)
+ s_scale = 1;
+ else if (quot >= 0x100)
+ s_scale = 0x10000 / quot;
+ else if (o >= 0x800000)
+ s_scale = 0x1000000 / (o / (monsize >> 8));
+ else
+ s_scale = 0x1000000 / ((o << 8) / monsize);
+ }
+#endif
+ else
+ s_scale = SCALE_1_TO_1;
+ moncontrol(1);
+}
+
+void
+_mcleanup()
+{
+ int fd;
+ int fromindex;
+ int endfrom;
+ char *frompc;
+ int toindex;
+ struct rawarc rawarc;
+ char *profdir;
+ char *proffile;
+ char *progname;
+ char buf[PATH_MAX];
+ extern char **___Argv;
+
+ moncontrol(0);
+
+ if ((profdir = getenv("PROFDIR")) != NULL) {
+ /* If PROFDIR contains a null value, no profiling output is produced */
+ if (*profdir == '\0') {
+ return;
+ }
+
+ progname=strrchr(___Argv[0], '/');
+ if (progname == NULL)
+ progname=___Argv[0];
+ else
+ progname++;
+
+ sprintf(buf, "%s/%ld.%s", profdir, getpid(), progname);
+ proffile = buf;
+ } else {
+ proffile = "gmon.out";
+ }
+
+ fd = creat( proffile, 0666 );
+ if ( fd < 0 ) {
+ perror( proffile );
+ return;
+ }
+# ifdef DEBUG
+ fprintf( stderr , "[mcleanup] sbuf 0x%x ssiz %d\n" , sbuf , ssiz );
+# endif DEBUG
+ write( fd , sbuf , ssiz );
+ endfrom = s_textsize / (HASHFRACTION * sizeof(*froms));
+ for ( fromindex = 0 ; fromindex < endfrom ; fromindex++ ) {
+ if ( froms[fromindex] == 0 ) {
+ continue;
+ }
+ frompc = s_lowpc + (fromindex * HASHFRACTION * sizeof(*froms));
+ for (toindex=froms[fromindex]; toindex!=0; toindex=tos[toindex].link) {
+# ifdef DEBUG
+ fprintf( stderr ,
+ "[mcleanup] frompc 0x%x selfpc 0x%x count %d\n" ,
+ frompc , tos[toindex].selfpc , tos[toindex].count );
+# endif DEBUG
+ rawarc.raw_frompc = (unsigned long) frompc;
+ rawarc.raw_selfpc = (unsigned long) tos[toindex].selfpc;
+ rawarc.raw_count = tos[toindex].count;
+ write( fd , &rawarc , sizeof rawarc );
+ }
+ }
+ close( fd );
+}
+
+/*
+ * The Sparc stack frame is only held together by the frame pointers
+ * in the register windows. According to the SVR4 SPARC ABI
+ * Supplement, Low Level System Information/Operating System
+ * Interface/Software Trap Types, a type 3 trap will flush all of the
+ * register windows to the stack, which will make it possible to walk
+ * the frames and find the return addresses.
+ * However, it seems awfully expensive to incur a trap (system
+ * call) for every function call. It turns out that "call" simply puts
+ * the return address in %o7 expecting the "save" in the procedure to
+ * shift it into %i7; this means that before the "save" occurs, %o7
+ * contains the address of the call to mcount, and %i7 still contains
+ * the caller above that. The asm mcount here simply saves those
+ * registers in argument registers and branches to internal_mcount,
+ * simulating a call with arguments.
+ * Kludges:
+ * 1) the branch to internal_mcount is hard coded; it should be
+ * possible to tell asm to use the assembler-name of a symbol.
+ * 2) in theory, the function calling mcount could have saved %i7
+ * somewhere and reused the register; in practice, I *think* this will
+ * break longjmp (and maybe the debugger) but I'm not certain. (I take
+ * some comfort in the knowledge that it will break the native mcount
+ * as well.)
+ * 3) if builtin_return_address worked, this could be portable.
+ * However, it would really have to be optimized for arguments of 0
+ * and 1 and do something like what we have here in order to avoid the
+ * trap per function call performance hit.
+ * 4) the atexit and monsetup calls prevent this from simply
+ * being a leaf routine that doesn't do a "save" (and would thus have
+ * access to %o7 and %i7 directly) but the call to write() at the end
+ * would have also prevented this.
+ *
+ * -- [eichin:19920702.1107EST]
+ */
+
+/* i7 == last ret, -> frompcindex */
+/* o7 == current ret, -> selfpc */
+/* Solaris 2 libraries use _mcount. */
+asm(".global _mcount; _mcount: mov %i7,%o1; mov %o7,%o0;b,a internal_mcount");
+/* This is for compatibility with old versions of gcc which used mcount. */
+asm(".global mcount; mcount: mov %i7,%o1; mov %o7,%o0;b,a internal_mcount");
+
+static void internal_mcount(selfpc, frompcindex)
+ register char *selfpc;
+ register unsigned short *frompcindex;
+{
+ register struct tostruct *top;
+ register struct tostruct *prevtop;
+ register long toindex;
+ static char already_setup;
+
+ /*
+ * find the return address for mcount,
+ * and the return address for mcount's caller.
+ */
+
+ if(!already_setup) {
+ extern etext();
+ already_setup = 1;
+ monstartup(0, etext);
+#ifdef USE_ONEXIT
+ on_exit(_mcleanup, 0);
+#else
+ atexit(_mcleanup);
+#endif
+ }
+ /*
+ * check that we are profiling
+ * and that we aren't recursively invoked.
+ */
+ if (profiling) {
+ goto out;
+ }
+ profiling++;
+ /*
+ * check that frompcindex is a reasonable pc value.
+ * for example: signal catchers get called from the stack,
+ * not from text space. too bad.
+ */
+ frompcindex = (unsigned short *)((long)frompcindex - (long)s_lowpc);
+ if ((unsigned long)frompcindex > s_textsize) {
+ goto done;
+ }
+ frompcindex =
+ &froms[((long)frompcindex) / (HASHFRACTION * sizeof(*froms))];
+ toindex = *frompcindex;
+ if (toindex == 0) {
+ /*
+ * first time traversing this arc
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ *frompcindex = toindex;
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = 0;
+ goto done;
+ }
+ top = &tos[toindex];
+ if (top->selfpc == selfpc) {
+ /*
+ * arc at front of chain; usual case.
+ */
+ top->count++;
+ goto done;
+ }
+ /*
+ * have to go looking down chain for it.
+ * top points to what we are looking at,
+ * prevtop points to previous top.
+ * we know it is not at the head of the chain.
+ */
+ for (; /* goto done */; ) {
+ if (top->link == 0) {
+ /*
+ * top is end of the chain and none of the chain
+ * had top->selfpc == selfpc.
+ * so we allocate a new tostruct
+ * and link it to the head of the chain.
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+ /*
+ * otherwise, check the next arc on the chain.
+ */
+ prevtop = top;
+ top = &tos[top->link];
+ if (top->selfpc == selfpc) {
+ /*
+ * there it is.
+ * increment its count
+ * move it to the head of the chain.
+ */
+ top->count++;
+ toindex = prevtop->link;
+ prevtop->link = top->link;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+
+ }
+done:
+ profiling--;
+ /* and fall through */
+out:
+ return; /* normal return restores saved registers */
+
+overflow:
+ profiling++; /* halt further profiling */
+# define TOLIMIT "mcount: tos overflow\n"
+ write(2, TOLIMIT, sizeof(TOLIMIT));
+ goto out;
+}
+
+/*
+ * Control profiling
+ * profiling is what mcount checks to see if
+ * all the data structures are ready.
+ */
+static void moncontrol(mode)
+ int mode;
+{
+ if (mode) {
+ /* start */
+ profil((unsigned short *)(sbuf + sizeof(struct phdr)),
+ ssiz - sizeof(struct phdr),
+ (int)s_lowpc, s_scale);
+ profiling = 0;
+ } else {
+ /* stop */
+ profil((unsigned short *)0, 0, 0, 0);
+ profiling = 3;
+ }
+}
diff --git a/contrib/gcc/config/sparc/lb1spc.asm b/contrib/gcc/config/sparc/lb1spc.asm
new file mode 100644
index 0000000..831f33a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lb1spc.asm
@@ -0,0 +1,784 @@
+/* This is an assembly language implementation of libgcc1.c for the sparc
+ processor.
+
+ These routines are derived from the Sparc Architecture Manual, version 8,
+ slightly edited to match the desired calling convention, and also to
+ optimize them for our purposes. */
+
+#ifdef L_mulsi3
+.text
+ .align 4
+ .global .umul
+ .proc 4
+.umul:
+ or %o0, %o1, %o4 ! logical or of multiplier and multiplicand
+ mov %o0, %y ! multiplier to Y register
+ andncc %o4, 0xfff, %o5 ! mask out lower 12 bits
+ be mul_shortway ! can do it the short way
+ andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc
+ !
+ ! long multiply
+ !
+ mulscc %o4, %o1, %o4 ! first iteration of 33
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 32nd iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ ! the upper 32 bits of product are wrong, but we do not care
+ retl
+ rd %y, %o0
+ !
+ ! short multiply
+ !
+mul_shortway:
+ mulscc %o4, %o1, %o4 ! first iteration of 13
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 12th iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ rd %y, %o5
+ sll %o4, 12, %o4 ! left shift partial product by 12 bits
+ srl %o5, 20, %o5 ! right shift partial product by 20 bits
+ retl
+ or %o5, %o4, %o0 ! merge for true product
+#endif
+
+#ifdef L_divsi3
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ * .div name of function to generate
+ * div div=div => %o0 / %o1; div=rem => %o0 % %o1
+ * true true=true => signed; true=false => unsigned
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+ .global .udiv
+ .align 4
+ .proc 4
+ .text
+.udiv:
+ b ready_to_divide
+ mov 0, %g3 ! result is always positive
+
+ .global .div
+ .align 4
+ .proc 4
+ .text
+.div:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge ready_to_divide ! no, go do the divide
+ xor %o1, %o0, %g3 ! compute sign in any case
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge ready_to_divide ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+
+
+ready_to_divide:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 ! ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-Sparc manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ sub %o2, 1, %o2
+
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o2, %o2
+1:
+ retl
+ mov %o2, %o0
+#endif
+
+#ifdef L_modsi3
+/* This implementation was taken from glibc:
+ *
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+.text
+ .align 4
+ .global .urem
+ .proc 4
+.urem:
+ b divide
+ mov 0, %g3 ! result always positive
+
+ .align 4
+ .global .rem
+ .proc 4
+.rem:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge 2f ! no, go do the divide
+ mov %o0, %g3 ! sign of remainder matches %o0
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge 2f ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+2:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+divide:
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 !ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-Sparc manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ add %o3, %o1, %o3
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o3, %o3
+1:
+ retl
+ mov %o3, %o0
+
+#endif
+
diff --git a/contrib/gcc/config/sparc/lb1spl.asm b/contrib/gcc/config/sparc/lb1spl.asm
new file mode 100644
index 0000000..4c8bc30
--- /dev/null
+++ b/contrib/gcc/config/sparc/lb1spl.asm
@@ -0,0 +1,246 @@
+/* This is an assembly language implementation of libgcc1.c for the sparclite
+ processor.
+
+ These routines are all from the Sparclite User's Guide, slightly edited
+ to match the desired calling convention, and also to optimize them. */
+
+#ifdef L_udivsi3
+.text
+ .align 4
+ .global .udiv
+ .proc 04
+.udiv:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ retl
+ divscc %g1,%o1,%o0
+#endif
+
+#ifdef L_umodsi3
+.text
+ .align 4
+ .global .urem
+ .proc 04
+.urem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ bl 1f
+ rd %y,%o0
+ retl
+ nop
+1: retl
+ add %o0,%o1,%o0
+#endif
+
+#ifdef L_divsi3
+.text
+ .align 4
+ .global .div
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the quotient.
+.div:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %g1,%o0 ! Quotient is in %g1.
+#endif
+
+#ifdef L_modsi3
+.text
+ .align 4
+ .global .rem
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the remainder.
+.rem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %o3,%o0 ! Remainder is in %o3.
+#endif
diff --git a/contrib/gcc/config/sparc/linux-aout.h b/contrib/gcc/config/sparc/linux-aout.h
new file mode 100644
index 0000000..76d7653
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux-aout.h
@@ -0,0 +1,130 @@
+/* Definitions for SPARC running Linux-based GNU systems with a.out.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <aoutos.h>
+#include <sparc/sparc.h>
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
+ For now, we play safe. It may change later. */
+
+#if 0
+#undef MULTIBYTE_CHARS
+#define MULTIBYTE_CHARS 1
+#endif
+
+/* We need that too. */
+#define HANDLE_SYSV_PRAGMA
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc GNU/Linux with a.out)");
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler,
+ and we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#undef LIB_SPEC
+
+#if 1
+/* We no longer link with libc_p.a or libg.a by default. If you
+ want to profile or debug the GNU/Linux C library, please add
+ -lc_p or -ggdb to LDFLAGS at the link time, respectively. */
+#define LIB_SPEC \
+"%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+#else
+#define LIB_SPEC \
+"%{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg -static}}}"
+#endif
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m sparclinux"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+#if 0
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. GNU/Linux does not support
+ long double yet. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/linux.h b/contrib/gcc/config/sparc/linux.h
new file mode 100644
index 0000000..7bbbfa4
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux.h
@@ -0,0 +1,259 @@
+/* Definitions for SPARC running Linux-based GNU systems with ELF.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define LINUX_DEFAULT_ELF
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
+ For now, we play safe. It may change later. */
+
+#if 0
+#undef MULTIBYTE_CHARS
+#define MULTIBYTE_CHARS 1
+#endif
+
+#ifndef USE_GNULIBC_1
+#undef DEFAULT_VTABLE_THUNKS
+#define DEFAULT_VTABLE_THUNKS 1
+#endif
+
+/* Use stabs instead of DWARF debug format. */
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include <sparc/sysv4.h>
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+/* This is for -profile to use -lc_p instead of -lc. */
+#undef CC1_SPEC
+#define CC1_SPEC "%{profile:-p} \
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc GNU/Linux with ELF)");
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -Dsparc -D__sparc__ -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#ifdef USE_GNULIBC_1
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
+#else
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+#endif
+
+#undef LIB_SPEC
+/* We no longer link with libc_p.a or libg.a by default. If you
+ want to profile or debug the GNU/Linux C library, please add
+ -lc_p or -ggdb to LDFLAGS at the link time, respectively. */
+#if 1
+#ifdef USE_GNULIBC_1
+#define LIB_SPEC \
+ "%{!shared: %{p:-lgmon} %{pg:-lgmon} %{profile:-lgmon -lc_p} \
+ %{!profile:%{!ggdb:-lc} %{ggdb:-lg}}}"
+#else
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+#endif
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
+
+/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+/* If ELF is the default format, we should not use /lib/elf. */
+
+#undef LINK_SPEC
+#ifdef USE_GNULIBC_1
+#ifndef LINUX_DEFAULT_ELF
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/elf/ld-linux.so.1} \
+ %{!rpath:-rpath /lib/elf/}} %{static:-static}}}"
+#else
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.1}} \
+ %{static:-static}}}"
+#endif
+#else
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2}} \
+ %{static:-static}}}"
+#endif
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+/* Same as sparc.h */
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. XXX */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+
+#if 0
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. GNU/Linux does not support
+ long double yet. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/linux64.h b/contrib/gcc/config/sparc/linux64.h
new file mode 100644
index 0000000..77bc668
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux64.h
@@ -0,0 +1,245 @@
+/* Definitions for 64-bit SPARC running Linux-based GNU systems with ELF.
+ Copyright 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by David S. Miller (davem@caip.rutgers.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? bi-architecture support will require changes to the linker
+ related specs, among perhaps other things (multilibs). */
+/* #define SPARC_BI_ARCH */
+
+#define LINUX_DEFAULT_ELF
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+#include <sparc/sysv4.h>
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-Av9a"
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64 GNU/Linux with ELF)");
+
+/* A 64 bit v9 compiler with stack-bias,
+ in a Medium/Anywhere code model environment. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \
+ + MASK_STACK_BIAS + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU)
+
+/* The default code model. */
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDANY
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D_LONGLONG -Dsparc -D__sparc__ -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{fPIC:-D__PIC__ -D__pic__} \
+%{fpic:-D__PIC__ -D__pic__} \
+%{posix:-D_POSIX_SOURCE} \
+%{pthread:-D_REENTRANT} \
+"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+
+/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+/* If ELF is the default format, we should not use /lib/elf. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf64_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux64.so.2}} \
+ %{static:-static}}} \
+%{mlittle-endian:-EL} \
+"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{V} \
+%{v:%{!V:-V}} \
+%{!Qn:-Qy} \
+%{n} \
+%{T} \
+%{Ym,*} \
+%{Wa,*:%*} \
+-s %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{mlittle-endian:-EL} \
+%(asm_cpu) %(asm_arch) \
+"
+
+/* Same as sparc.h */
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* System V Release 4 uses DWARF debugging info. Buf DWARF1 doesn't do
+ 64-bit anything, so we use DWARF2. */
+
+#undef DWARF2_DEBUGGING_INFO
+#undef DWARF_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+/* Stabs doesn't use this, and it confuses a simulator. */
+/* ??? Need to see what DWARF needs, if anything. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE)
+
+/* Define the names of various pseudo-ops used by the Sparc/svr4 assembler.
+ ??? If ints are 64 bits then UNALIGNED_INT_ASM_OP (defined elsewhere) is
+ misnamed. These should all refer to explicit sizes (half/word/xword?),
+ anything other than short/int/long/etc. */
+
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+
+/* DWARF bits. */
+
+/* Follow Irix 6 and not the Dwarf2 draft in using 64-bit offsets.
+ Obviously the Dwarf2 folks havn't tried to actually build systems
+ with their spec. On a 64-bit system, only 64-bit relocs become
+ RELATIVE relocations. */
+
+/* #define DWARF_OFFSET_SIZE PTR_SIZE */
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/lite.h b/contrib/gcc/config/sparc/lite.h
new file mode 100644
index 0000000..55c232a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lite.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler, for SPARClite w/o FPU.
+ Copyright (C) 1993, 1996 Free Software Foundation, Inc.
+ Contributed by Jim Wilson (wilson@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Dsparclite -Acpu(sparc) -Amachine(sparc)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparclite)");
+
+/* Enable app-regs and epilogue options. Do not enable the fpu. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+
+/* US Software GOFAST library support. */
+#include "gofast.h"
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS INIT_GOFAST_OPTABS
diff --git a/contrib/gcc/config/sparc/litecoff.h b/contrib/gcc/config/sparc/litecoff.h
new file mode 100644
index 0000000..bd89e1b
--- /dev/null
+++ b/contrib/gcc/config/sparc/litecoff.h
@@ -0,0 +1,113 @@
+/* Definitions of target machine for GNU compiler, for SPARClite w/o FPU, COFF.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Written by Ken Raeburn (raeburn@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/lite.h"
+
+#undef ASM_OUTPUT_IDENT
+
+#undef SELECT_SECTION
+#undef SELECT_RTX_SECTION
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+
+#include "svr3.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Dsparclite -Acpu(sparc) -Amachine(sparc)"
+
+/* Default to stabs in COFF. */
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define INT_ASM_OP ".long"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
diff --git a/contrib/gcc/config/sparc/lynx-ng.h b/contrib/gcc/config/sparc/lynx-ng.h
new file mode 100644
index 0000000..9e9f82c
--- /dev/null
+++ b/contrib/gcc/config/sparc/lynx-ng.h
@@ -0,0 +1,41 @@
+/* Definitions for SPARC running LynxOS, using Lynx's old as and ld.
+ Copyright (C) 1993, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+#include <lynx-ng.h>
+
+/* ??? Must redefine to get sparclite and v8 defines. Can this be done
+ differently? */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{mthreads:-D_MULTITHREADED} \
+ %{mposix:-D_POSIX_SOURCE} \
+ %{msystem-v:-I/usr/include_v} \
+ %(cpp_cpu)"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -DLynx -DIBITS32 -Asystem(unix) -Asystem(lynx) -Acpu(sparc) -Amachine(sparc)"
+
+/* Provide required defaults for linker switches. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-e __main -T 0 %{msystem-v:-V} %{mcoff:-k}"
diff --git a/contrib/gcc/config/sparc/lynx.h b/contrib/gcc/config/sparc/lynx.h
new file mode 100644
index 0000000..99b319a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lynx.h
@@ -0,0 +1,53 @@
+/* Definitions for SPARC running LynxOS.
+ Copyright (C) 1993, 1995, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+
+#undef ASM_OUTPUT_IDENT
+#undef SELECT_SECTION
+#undef SELECT_RTX_SECTION
+
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+
+#include <lynx.h>
+
+/* ??? Must redefine to get sparclite and v8 defines. Can this be done
+ differently? */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{mthreads:-D_MULTITHREADED} \
+ %{mposix:-D_POSIX_SOURCE} \
+ %{msystem-v:-I/usr/include_v} \
+ %(cpp_cpu)"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -DSPARC -DLynx -DLYNX -DIBITS32 -Asystem(unix) -Asystem(lynx) -Acpu(sparc) -Amachine(sparc)"
+
+#undef LINK_SPEC
+
+/* Sparc version of libc.a has references to libm.a (printf calls pow for
+ instance), so we must always link both. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{mthreads:-L/lib/thread/} \
+ %{msystem-v:-lc_v -lm_v -lc_v} \
+ %{!msystem-v:%{mposix:-lc_p} -lc -lm -lc}"
diff --git a/contrib/gcc/config/sparc/netbsd.h b/contrib/gcc/config/sparc/netbsd.h
new file mode 100644
index 0000000..a512f41
--- /dev/null
+++ b/contrib/gcc/config/sparc/netbsd.h
@@ -0,0 +1,46 @@
+#include <sparc/sparc.h>
+
+/* Get generic NetBSD definitions. */
+
+#include <netbsd.h>
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -D__NetBSD__ -Asystem(unix) -Asystem(NetBSD) -Acpu(sparc) -Amachine(sparc)"
+
+/* Make gcc agree with <machine/ansi.h> */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#define DWARF2_UNWIND_INFO 0
+
diff --git a/contrib/gcc/config/sparc/openbsd.h b/contrib/gcc/config/sparc/openbsd.h
new file mode 100644
index 0000000..19ece97
--- /dev/null
+++ b/contrib/gcc/config/sparc/openbsd.h
@@ -0,0 +1,68 @@
+/* Configuration file for sparc OpenBSD target.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+
+/* Get generic OpenBSD definitions. */
+#define OBSD_OLD_GAS
+#include <openbsd.h>
+
+/* Run-time target specifications. */
+#define CPP_PREDEFINES "-D__unix__ -D__sparc__ -D__OpenBSD__ -Asystem(unix) -Asystem(OpenBSD) -Acpu(sparc) -Amachine(sparc)"
+
+/* Layout of source language data types */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Specific options for DBX Output. */
+
+/* This is BSD, so it wants DBX format. */
+#define DBX_DEBUGGING_INFO
+
+/* This is the char to use for continuation */
+#define DBX_CONTIN_CHAR '?'
+
+/* Stack & calling: aggregate returns. */
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. egcs doesn't try too hard to check internal
+ configuration files... */
+#define DWARF2_UNWIND_INFO 0
+
+/* Default sparc.h does already define ASM_OUTPUT_MI_THUNK */
+
diff --git a/contrib/gcc/config/sparc/pbd.h b/contrib/gcc/config/sparc/pbd.h
new file mode 100644
index 0000000..459bffd
--- /dev/null
+++ b/contrib/gcc/config/sparc/pbd.h
@@ -0,0 +1,184 @@
+/* Definitions of target machine for GNU compiler, Citicorp/TTI Unicom PBD
+ version (using GAS and COFF (encapsulated is unacceptable) )
+ Copyright (C) 1990, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -DUnicomPBD -Dunix -D__GCC_NEW_VARARGS__ -Asystem(unix) -Acpu(sparc) -Amachine(sparc)"
+
+/* We want DBX format for use with gdb under COFF. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Generate calls to memcpy, memcmp and memset. */
+
+#define TARGET_MEM_FUNCTIONS
+
+/* we use /lib/libp/lib* when profiling */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{p:-L/usr/lib/libp} %{pg:-L/usr/lib/libp} -lc"
+
+
+/* Use crt1.o as a startup file and crtn.o as a closing file. */
+/*
+ * The loader directive file gcc.ifile defines how to merge the constructor
+ * sections into the data section. Also, since gas only puts out those
+ * sections in response to N_SETT stabs, and does not (yet) have a
+ * ".sections" directive, gcc.ifile also defines the list symbols
+ * __DTOR_LIST__ and __CTOR_LIST__.
+ *
+ * Finally, we must explicitly specify the file from libgcc.a that defines
+ * exit(), otherwise if the user specifies (for example) "-lc_s" on the
+ * command line, the wrong exit() will be used and global destructors will
+ * not get called .
+ */
+
+#define STARTFILE_SPEC \
+"%{!r: gcc.ifile%s} %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}} \
+%{!r:_exit.o%s}"
+
+#define ENDFILE_SPEC "crtn.o%s"
+
+/* cpp has to support a #sccs directive for the /usr/include files */
+
+#define SCCS_DIRECTIVE
+
+/* LINK_SPEC is needed only for SunOS 4. */
+
+#undef LINK_SPEC
+
+/* Although the gas we use can create .ctor and .dtor sections from N_SETT
+ stabs, it does not support section directives, so we need to have the loader
+ define the lists.
+ */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+/* similar to default, but allows for the table defined by ld with gcc.ifile.
+ nptrs is always 0. So we need to instead check that __DTOR_LIST__[1] != 0.
+ The old check is left in so that the same macro can be used if and when
+ a future version of gas does support section directives. */
+
+#define DO_GLOBAL_DTORS_BODY {int nptrs = *(int *)__DTOR_LIST__; int i; \
+ if (nptrs == -1 || (__DTOR_LIST__[0] == 0 && __DTOR_LIST__[1] != 0)) \
+ for (nptrs = 0; __DTOR_LIST__[nptrs + 1] != 0; nptrs++); \
+ for (i = nptrs; i >= 1; i--) \
+ __DTOR_LIST__[i] (); }
+
+/*
+ * Here is an example gcc.ifile. I've tested it on PBD sparc
+ * systems. The NEXT(0x200000) works on just about all 386 and m68k systems,
+ * but can be reduced to any power of 2 that is >= NBPS (0x40000 on a pbd).
+
+ SECTIONS {
+ .text BIND(0x41000200) BLOCK (0x200) :
+ { *(.init) *(.text) vfork = fork; *(.fini) }
+
+ GROUP BIND( NEXT(0x200000) + ADDR(.text) + SIZEOF(.text)):
+ { .data : { __CTOR_LIST__ = . ; . += 4; *(.ctor) . += 4 ;
+ __DTOR_LIST__ = . ; . += 4; *(.dtor) . += 4 ; }
+ .bss : { }
+ }
+ }
+ */
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* fixes: */
+/*
+ * Internal labels are prefixed with a period.
+ */
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.%s%d", PREFIX, NUM)
+
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.word .L%d-.L%d\n", VALUE, REL)
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The 68000 does not use such vectors,
+ but we must define this macro anyway.) */
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.word .L%d\n", VALUE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(.LP%d),%%o0\n\tcall mcount\n\tor %%lo(.LP%d),%%o0,%%o0\n", \
+ (LABELNO), (LABELNO))
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#undef FUNCTION_BLOCK_PROFILER
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(.LPBX0),%%o0\n\tld [%%lo(.LPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne .LPY%d\n\tnop\n\tcall ___bb_init_func\n\tnop\n.LPY%d:\n", \
+ (LABELNO), (LABELNO))
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#undef BLOCK_PROFILER
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+{ \
+ int blockn = (BLOCKNO); \
+ fprintf (FILE, "\tsethi %%hi(.LPBX2+%d),%%g1\n\tld [%%lo(.LPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(.LPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+ CC_STATUS_INIT; /* We have clobbered %g1. Also %g2. */ \
+}
+/* This is needed for SunOS 4.0, and should not hurt for 3.2
+ versions either. */
+#undef ASM_OUTPUT_SOURCE_LINE(file, line)
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+ { static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,.LM%d\n.LM%d:\n", \
+ line, sym_lineno, sym_lineno); \
+ sym_lineno += 1; }
+
+#define ASM_INT_OP ".long "
diff --git a/contrib/gcc/config/sparc/rtems.h b/contrib/gcc/config/sparc/rtems.h
new file mode 100644
index 0000000..1ab0a42
--- /dev/null
+++ b/contrib/gcc/config/sparc/rtems.h
@@ -0,0 +1,35 @@
+/* Definitions for rtems targeting a SPARC using a.out.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* Specify predefined symbols in preprocessor. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -Drtems -D__rtems__ \
+ -Asystem(rtems) -Acpu(sparc) -Amachine(sparc)"
+
+/* Generate calls to memcpy, memcmp and memset. */
+#ifndef TARGET_MEM_FUNCTIONS
+#define TARGET_MEM_FUNCTIONS
+#endif
+
+/* end of sparc/rtems.h */
diff --git a/contrib/gcc/config/sparc/sol2-c1.asm b/contrib/gcc/config/sparc/sol2-c1.asm
new file mode 100644
index 0000000..618d698
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-c1.asm
@@ -0,0 +1,86 @@
+! crt1.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file takes control of the process from the kernel, as specified
+! in section 3 of the SVr4 ABI.
+! This file is the first thing linked into any executable.
+
+ .section ".text"
+ .proc 022
+ .global _start
+
+_start:
+ mov 0, %fp ! Mark bottom frame pointer
+ ld [%sp + 64], %l0 ! argc
+ add %sp, 68, %l1 ! argv
+
+ ! Leave some room for a call. Sun leaves 32 octets (to sit on
+ ! a cache line?) so we do too.
+ sub %sp, 32, %sp
+
+ ! %g1 may contain a function to be registered w/atexit
+ orcc %g0, %g1, %g0
+ be .nope
+ mov %g1, %o0
+ call atexit
+ nop
+.nope:
+ ! Now make sure constructors and destructors are handled.
+ set _fini, %o0
+ call atexit, 1
+ nop
+ call _init, 0
+ nop
+
+ ! We ignore the auxiliary vector; there's no defined way to
+ ! access those data anyway. Instead, go straight to main:
+ mov %l0, %o0 ! argc
+ mov %l1, %o1 ! argv
+ ! Skip argc words past argv, to env:
+ sll %l0, 2, %o2
+ add %o2, 4, %o2
+ add %l1, %o2, %o2 ! env
+ set _environ, %o3
+ st %o2, [%o3] ! *_environ
+ call main, 4
+ nop
+ call exit, 0
+ nop
+ call _exit, 0
+ nop
+ ! We should never get here.
+
+ .type _start,#function
+ .size _start,.-_start
diff --git a/contrib/gcc/config/sparc/sol2-ci.asm b/contrib/gcc/config/sparc/sol2-ci.asm
new file mode 100644
index 0000000..dd09a34
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-ci.asm
@@ -0,0 +1,60 @@
+! crti.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just make a stack frame for the contents of the .fini and
+! .init sections. Users may put any desired instructions in those
+! sections.
+
+! This file is linked in before the Values-Xx.o files and also before
+! crtbegin, with which perhaps it should be merged.
+
+ .file "crti.s"
+
+ .section ".init"
+ .proc 022
+ .global _init
+ .type _init,#function
+ .align 4
+_init:
+ save %sp, -96, %sp
+
+
+ .section ".fini"
+ .proc 022
+ .global _fini
+ .type _fini,#function
+ .align 4
+_fini:
+ save %sp, -96, %sp
diff --git a/contrib/gcc/config/sparc/sol2-cn.asm b/contrib/gcc/config/sparc/sol2-cn.asm
new file mode 100644
index 0000000..3c5d508
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-cn.asm
@@ -0,0 +1,54 @@
+! crtn.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just makes sure that the .fini and .init sections do in
+! fact return. Users may put any desired instructions in those sections.
+! This file is the last thing linked into any executable.
+
+ .file "crtn.s"
+
+ .section ".init"
+ .align 4
+
+ ret
+ restore
+
+ .section ".fini"
+ .align 4
+
+ ret
+ restore
+
+! Th-th-th-that's all folks!
diff --git a/contrib/gcc/config/sparc/sol2-g1.asm b/contrib/gcc/config/sparc/sol2-g1.asm
new file mode 100644
index 0000000..b9d8788
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-g1.asm
@@ -0,0 +1,88 @@
+! gcrt1.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file takes control of the process from the kernel, as specified
+! in section 3 of the SVr4 ABI.
+! This file is the first thing linked into any executable.
+
+ .section ".text"
+ .proc 022
+ .global _start
+
+_start:
+ mov 0, %fp ! Mark bottom frame pointer
+ ld [%sp + 64], %l0 ! argc
+ add %sp, 68, %l1 ! argv
+
+ ! Leave some room for a call. Sun leaves 32 octets (to sit on
+ ! a cache line?) so we do too.
+ sub %sp, 32, %sp
+
+ ! %g1 may contain a function to be registered w/atexit
+ orcc %g0, %g1, %g0
+ be .nope
+ mov %g1, %o0
+ call atexit
+ nop
+.nope:
+ ! Now make sure constructors and destructors are handled.
+ set _fini, %o0
+ call atexit, 1
+ nop
+ call _init, 0
+ nop
+
+ ! We ignore the auxiliary vector; there's no defined way to
+ ! access those data anyway. Instead, go straight to main:
+ mov %l0, %o0 ! argc
+ mov %l1, %o1 ! argv
+ set ___Argv, %o3
+ st %o1, [%o3] ! *___Argv
+ ! Skip argc words past argv, to env:
+ sll %l0, 2, %o2
+ add %o2, 4, %o2
+ add %l1, %o2, %o2 ! env
+ set _environ, %o3
+ st %o2, [%o3] ! *_environ
+ call main, 4
+ nop
+ call exit, 0
+ nop
+ call _exit, 0
+ nop
+ ! We should never get here.
+
+ .type _start,#function
+ .size _start,.-_start
diff --git a/contrib/gcc/config/sparc/sol2-sld.h b/contrib/gcc/config/sparc/sol2-sld.h
new file mode 100644
index 0000000..a824987
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-sld.h
@@ -0,0 +1,11 @@
+/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
+ using the system linker. */
+
+#include "sparc/sol2.h"
+
+/* At least up through Solaris 2.6,
+ the system linker does not work with DWARF or DWARF2,
+ since it does not have working support for relocations
+ to unaligned data. */
+
+#define LINKER_DOES_NOT_WORK_WITH_DWARF2
diff --git a/contrib/gcc/config/sparc/sol2.h b/contrib/gcc/config/sparc/sol2.h
new file mode 100644
index 0000000..a0fa4a8
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2.h
@@ -0,0 +1,232 @@
+/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
+ Copyright 1992, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@netcom.com).
+ Additional changes by David V. Henkel-Wallace (gumby@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Supposedly the same as vanilla sparc svr4, except for the stuff below: */
+#include "sparc/sysv4.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dsparc -Dsun -Dunix -D__svr4__ -D__SVR4 \
+-Asystem(unix) -Asystem(svr4)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{pthreads:-D_REENTRANT -D_PTHREADS} \
+%{!pthreads:%{threads:-D_REENTRANT -D_SOLARIS_THREADS}} \
+%{compat-bsd:-iwithprefixbefore ucbinclude -I/usr/ucbinclude} \
+"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+%{fpic:-K PIC} %{fPIC:-K PIC} \
+%(asm_cpu) \
+"
+
+/* This is here rather than in sparc.h because it's not known what
+ other assemblers will accept. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-xarch=v8plus"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-xarch=v8plusa"
+#endif
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC "\
+%{mcpu=v8plus:-xarch=v8plus} \
+%{mcpu=ultrasparc:-xarch=v8plusa} \
+%{!mcpu*:%(asm_cpu_default)} \
+"
+
+/* However it appears that Solaris 2.0 uses the same reg numbering as
+ the old BSD-style system did. */
+
+#undef DBX_REGISTER_NUMBER
+/* Same as sparc.h */
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* The Solaris 2 assembler uses .skip, not .zero, so put this back. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+/* Use .uahalf/.uaword so packed structure members don't generate
+ assembler errors when using the native assembler. */
+#undef ASM_SHORT
+#define ASM_SHORT ".uahalf"
+#undef ASM_LONG
+#define ASM_LONG ".uaword"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*.L%s%ld", (PREFIX), (long)(NUM))
+
+
+/* We don't use the standard svr4 STARTFILE_SPEC because it's wrong for us.
+ We don't use the standard LIB_SPEC only because we don't yet support c++ */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{p:mcrt1.o%s} \
+ %{!p: \
+ %{pg:gcrt1.o%s gmon.o%s} \
+ %{!pg:crt1.o%s}}}} \
+ crti.o%s \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}} \
+ crtbegin.o%s"
+
+/* ??? Note: in order for -compat-bsd to work fully,
+ we must somehow arrange to fixincludes /usr/ucbinclude
+ and put the result in $(libsubdir)/ucbinclude. */
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{compat-bsd:-lucb -lsocket -lnsl -lelf -laio} \
+ %{!shared:\
+ %{!symbolic:\
+ %{pthreads:-lpthread} \
+ %{!pthreads:%{threads:-lthread}} \
+ -lc}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* This should be the same as in svr4.h, except with -R added. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{h*} %{v:-V} \
+ %{b} %{Wl,*:%*} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy %{!mimpure-text:-z text}} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
+ -R /usr/ucblib} \
+ %{!compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}} \
+ %{Qy:} %{!Qn:-Qy}"
+
+/* This defines which switch letters take arguments.
+ It is as in svr4.h but with -R added. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'x' \
+ || (CHAR) == 'z')
+
+/* ??? This does not work in SunOS 4.x, so it is not enabled in sparc.h.
+ Instead, it is enabled here, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* But indicate that it isn't supported by the hardware. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+#define STDC_0_IN_SYSTEM_HEADERS
+
+#define MULDI3_LIBCALL "__mul64"
+#define DIVDI3_LIBCALL "__div64"
+#define UDIVDI3_LIBCALL "__udiv64"
+#define MODDI3_LIBCALL "__rem64"
+#define UMODDI3_LIBCALL "__urem64"
+
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS \
+ fixsfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ftoll"); \
+ fixunssfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ftoull"); \
+ fixdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__dtoll"); \
+ fixunsdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__dtoull")
+
+/* No weird SPARC variants on Solaris */
+#undef TARGET_LIVE_G0
+#define TARGET_LIVE_G0 0
+#undef TARGET_BROKEN_SAVERESTORE
+#define TARGET_BROKEN_SAVERESTORE 0
+
+/* Solaris allows 64 bit out and global registers in 32 bit mode.
+ sparc_override_options will disable V8+ if not generating V9 code. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_V8PLUS)
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
+
diff --git a/contrib/gcc/config/sparc/sp64-aout.h b/contrib/gcc/config/sparc/sp64-aout.h
new file mode 100644
index 0000000..e3056df
--- /dev/null
+++ b/contrib/gcc/config/sparc/sp64-aout.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler, for SPARC64, a.out.
+ Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans, dje@cygnus.com.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+#include "aoutos.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64-aout)")
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
+ + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
+
+/* The only code model supported is Medium/Low. */
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDLOW
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
diff --git a/contrib/gcc/config/sparc/sp64-elf.h b/contrib/gcc/config/sparc/sp64-elf.h
new file mode 100644
index 0000000..2482866
--- /dev/null
+++ b/contrib/gcc/config/sparc/sp64-elf.h
@@ -0,0 +1,157 @@
+/* Definitions of target machine for GNU compiler, for SPARC64, ELF.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans, dje@cygnus.com.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? We're taking the scheme of including another file and then overriding
+ the values we don't like a bit too far here. The alternative is to more or
+ less duplicate all of svr4.h, sparc/sysv4.h, and sparc/sol2.h here
+ (suitably cleaned up). */
+
+#include "sparc/sol2.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64-elf)")
+
+/* A 64 bit v9 compiler in a Medium/Anywhere code model environment. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+(MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
+ + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
+
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_EMBMEDANY
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__ELF__ -Acpu(sparc) -Amachine(sparc)"
+
+/* __svr4__ is used by the C library (FIXME) */
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "-D__svr4__"
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} -s %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{mlittle-endian:-EL} \
+%(asm_cpu) %(asm_arch) \
+"
+
+/* This is taken from sol2.h. */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{v:-V} \
+%{mlittle-endian:-EL} \
+"
+
+/* We need something a little simpler for the embedded environment.
+ Profiling doesn't really work yet so we just copy the default. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}} \
+crtbegin.o%s \
+"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s"
+
+/* Use the default (for now). */
+#undef LIB_SPEC
+
+/* V9 chips can handle either endianness. */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{"big-endian", -MASK_LITTLE_ENDIAN}, \
+{"little-endian", MASK_LITTLE_ENDIAN},
+
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+
+#undef WORDS_BIG_ENDIAN
+#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+
+/* ??? This should be 32 bits for v9 but what can we do? */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* The medium/anywhere code model practically requires us to put jump tables
+ in the text section as gcc is unable to distinguish LABEL_REF's of jump
+ tables from other label refs (when we need to). */
+/* ??? Revisit this. */
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* System V Release 4 uses DWARF debugging info.
+ GDB doesn't support 64 bit stabs yet and the desired debug format is DWARF
+ anyway so it is the default. */
+
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Stabs doesn't use this, and it confuses a simulator. */
+/* ??? Need to see what DWARF needs, if anything. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE)
+
+/* Define the names of various pseudo-ops used by the Sparc/svr4 assembler.
+ ??? If ints are 64 bits then UNALIGNED_INT_ASM_OP (defined elsewhere) is
+ misnamed. These should all refer to explicit sizes (half/word/xword?),
+ anything other than short/int/long/etc. */
+
+#define UNALIGNED_LONGLONG_ASM_OP ".uaxword"
+
+/* DWARF stuff. */
+
+#define ASM_OUTPUT_DWARF_ADDR(FILE, LABEL) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_LONGLONG_ASM_OP); \
+ assemble_name ((FILE), (LABEL)); \
+ fprintf ((FILE), "\n"); \
+} while (0)
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE, RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_LONGLONG_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE, ADDR) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_LONGLONG_ASM_OP, (ADDR))
+
+/* ??? Not sure if this should be 4 or 8 bytes. 4 works for now. */
+#define ASM_OUTPUT_DWARF_REF(FILE, LABEL) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name ((FILE), (LABEL)); \
+ fprintf ((FILE), "\n"); \
+} while (0)
diff --git a/contrib/gcc/config/sparc/sparc.c b/contrib/gcc/config/sparc/sparc.c
new file mode 100644
index 0000000..e350729
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.c
@@ -0,0 +1,6461 @@
+/* Subroutines for insn-output.c for Sun SPARC.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+ 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+ at Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "recog.h"
+#include "toplev.h"
+
+/* 1 if the caller has placed an "unimp" insn immediately after the call.
+ This is used in v8 code when calling a function that returns a structure.
+ v9 doesn't have this. Be careful to have this test be the same as that
+ used on the call. */
+
+#define SKIP_CALLERS_UNIMP_P \
+(!TARGET_ARCH64 && current_function_returns_struct \
+ && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+ && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+ == INTEGER_CST))
+
+/* Global variables for machine-dependent things. */
+
+/* Size of frame. Need to know this to emit return insns from leaf procedures.
+ ACTUAL_FSIZE is set by compute_frame_size() which is called during the
+ reload pass. This is important as the value is later used in insn
+ scheduling (to see what can go in a delay slot).
+ APPARENT_FSIZE is the size of the stack less the register save area and less
+ the outgoing argument area. It is used when saving call preserved regs. */
+static int apparent_fsize;
+static int actual_fsize;
+
+/* Save the operands last given to a compare for use when we
+ generate a scc or bcc insn. */
+
+rtx sparc_compare_op0, sparc_compare_op1;
+
+/* We may need an epilogue if we spill too many registers.
+ If this is non-zero, then we branch here for the epilogue. */
+static rtx leaf_label;
+
+#ifdef LEAF_REGISTERS
+
+/* Vector to say how input registers are mapped to output
+ registers. FRAME_POINTER_REGNUM cannot be remapped by
+ this function to eliminate it. You must use -fomit-frame-pointer
+ to get that. */
+char leaf_reg_remap[] =
+{ 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, 14, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ 8, 9, 10, 11, 12, 13, -1, 15,
+
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100};
+
+#endif
+
+/* Name of where we pretend to think the frame pointer points.
+ Normally, this is "%fp", but if we are in a leaf procedure,
+ this is "%sp+something". We record "something" separately as it may be
+ too big for reg+constant addressing. */
+
+static char *frame_base_name;
+static int frame_base_offset;
+
+static rtx pic_setup_code PROTO((void));
+static rtx find_addr_reg PROTO((rtx));
+static void sparc_init_modes PROTO((void));
+static int save_regs PROTO((FILE *, int, int, char *,
+ int, int, int));
+static int restore_regs PROTO((FILE *, int, int, char *, int, int));
+static void build_big_number PROTO((FILE *, int, char *));
+static int function_arg_slotno PROTO((const CUMULATIVE_ARGS *,
+ enum machine_mode, tree, int, int,
+ int *, int *));
+
+#ifdef DWARF2_DEBUGGING_INFO
+extern char *dwarf2out_cfi_label ();
+#endif
+
+/* Option handling. */
+
+/* Code model option as passed by user. */
+char *sparc_cmodel_string;
+/* Parsed value. */
+enum cmodel sparc_cmodel;
+
+/* Record alignment options as passed by user. */
+char *sparc_align_loops_string;
+char *sparc_align_jumps_string;
+char *sparc_align_funcs_string;
+
+/* Parsed values, as a power of two. */
+int sparc_align_loops;
+int sparc_align_jumps;
+int sparc_align_funcs;
+
+struct sparc_cpu_select sparc_select[] =
+{
+ /* switch name, tune arch */
+ { (char *)0, "default", 1, 1 },
+ { (char *)0, "-mcpu=", 1, 1 },
+ { (char *)0, "-mtune=", 1, 0 },
+ { 0, 0, 0, 0 }
+};
+
+/* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
+enum processor_type sparc_cpu;
+
+/* Validate and override various options, and do some machine dependent
+ initialization. */
+
+void
+sparc_override_options ()
+{
+ static struct code_model {
+ char *name;
+ int value;
+ } cmodels[] = {
+ { "32", CM_32 },
+ { "medlow", CM_MEDLOW },
+ { "medmid", CM_MEDMID },
+ { "medany", CM_MEDANY },
+ { "embmedany", CM_EMBMEDANY },
+ { 0, 0 }
+ };
+ struct code_model *cmodel;
+ /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
+ static struct cpu_default {
+ int cpu;
+ char *name;
+ } cpu_default[] = {
+ /* There must be one entry here for each TARGET_CPU value. */
+ { TARGET_CPU_sparc, "cypress" },
+ { TARGET_CPU_sparclet, "tsc701" },
+ { TARGET_CPU_sparclite, "f930" },
+ { TARGET_CPU_v8, "v8" },
+ { TARGET_CPU_supersparc, "supersparc" },
+ { TARGET_CPU_v9, "v9" },
+ { TARGET_CPU_ultrasparc, "ultrasparc" },
+ { 0, 0 }
+ };
+ struct cpu_default *def;
+ /* Table of values for -m{cpu,tune}=. */
+ static struct cpu_table {
+ char *name;
+ enum processor_type processor;
+ int disable;
+ int enable;
+ } cpu_table[] = {
+ { "v7", PROCESSOR_V7, MASK_ISA, 0 },
+ { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
+ { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
+ /* TI TMS390Z55 supersparc */
+ { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
+ { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
+ /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
+ The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
+ { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
+ { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
+ { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
+ /* TEMIC sparclet */
+ { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
+ { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
+ /* TI ultrasparc */
+ { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9 },
+ { 0, 0, 0, 0 }
+ };
+ struct cpu_table *cpu;
+ struct sparc_cpu_select *sel;
+ int fpu;
+
+#ifndef SPARC_BI_ARCH
+ /* Check for unsupported architecture size. */
+ if (! TARGET_64BIT != DEFAULT_ARCH32_P)
+ {
+ error ("%s is not supported by this configuration",
+ DEFAULT_ARCH32_P ? "-m64" : "-m32");
+ }
+#endif
+
+ /* Code model selection. */
+ sparc_cmodel = SPARC_DEFAULT_CMODEL;
+ if (sparc_cmodel_string != NULL)
+ {
+ if (TARGET_ARCH64)
+ {
+ for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
+ if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
+ break;
+ if (cmodel->name == NULL)
+ error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
+ else
+ sparc_cmodel = cmodel->value;
+ }
+ else
+ error ("-mcmodel= is not supported on 32 bit systems");
+ }
+
+ fpu = TARGET_FPU; /* save current -mfpu status */
+
+ /* Set the default CPU. */
+ for (def = &cpu_default[0]; def->name; ++def)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+ if (! def->name)
+ abort ();
+ sparc_select[0].string = def->name;
+
+ for (sel = &sparc_select[0]; sel->name; ++sel)
+ {
+ if (sel->string)
+ {
+ for (cpu = &cpu_table[0]; cpu->name; ++cpu)
+ if (! strcmp (sel->string, cpu->name))
+ {
+ if (sel->set_tune_p)
+ sparc_cpu = cpu->processor;
+
+ if (sel->set_arch_p)
+ {
+ target_flags &= ~cpu->disable;
+ target_flags |= cpu->enable;
+ }
+ break;
+ }
+
+ if (! cpu->name)
+ error ("bad value (%s) for %s switch", sel->string, sel->name);
+ }
+ }
+
+ /* If -mfpu or -mno-fpu was explicitly used, don't override with
+ the processor default. */
+ if (TARGET_FPU_SET)
+ target_flags = (target_flags & ~MASK_FPU) | fpu;
+
+ /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
+ if (TARGET_V9 && TARGET_ARCH32)
+ target_flags |= MASK_DEPRECATED_V8_INSNS;
+
+ /* V8PLUS requires V9 */
+ if (! TARGET_V9)
+ target_flags &= ~MASK_V8PLUS;
+
+ /* Don't use stack biasing in 32 bit mode. */
+ if (TARGET_ARCH32)
+ target_flags &= ~MASK_STACK_BIAS;
+
+ /* Validate -malign-loops= value, or provide default. */
+ if (sparc_align_loops_string)
+ {
+ sparc_align_loops = exact_log2 (atoi (sparc_align_loops_string));
+ if (sparc_align_loops < 2 || sparc_align_loops > 7)
+ fatal ("-malign-loops=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_loops_string);
+ }
+ else
+ {
+ /* ??? This relies on ASM_OUTPUT_ALIGN to not emit the alignment if
+ its 0. This sounds a bit kludgey. */
+ sparc_align_loops = 0;
+ }
+
+ /* Validate -malign-jumps= value, or provide default. */
+ if (sparc_align_jumps_string)
+ {
+ sparc_align_jumps = exact_log2 (atoi (sparc_align_jumps_string));
+ if (sparc_align_jumps < 2 || sparc_align_loops > 7)
+ fatal ("-malign-jumps=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_jumps_string);
+ }
+ else
+ {
+ /* ??? This relies on ASM_OUTPUT_ALIGN to not emit the alignment if
+ its 0. This sounds a bit kludgey. */
+ sparc_align_jumps = 0;
+ }
+
+ /* Validate -malign-functions= value, or provide default. */
+ if (sparc_align_funcs_string)
+ {
+ sparc_align_funcs = exact_log2 (atoi (sparc_align_funcs_string));
+ if (sparc_align_funcs < 2 || sparc_align_loops > 7)
+ fatal ("-malign-functions=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_funcs_string);
+ }
+ else
+ sparc_align_funcs = DEFAULT_SPARC_ALIGN_FUNCS;
+
+ /* Validate PCC_STRUCT_RETURN. */
+ if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
+ flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
+
+ /* Do various machine dependent initializations. */
+ sparc_init_modes ();
+}
+
+/* Miscellaneous utilities. */
+
+/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
+ or branch on register contents instructions. */
+
+int
+v9_regcmp_p (code)
+ enum rtx_code code;
+{
+ return (code == EQ || code == NE || code == GE || code == LT
+ || code == LE || code == GT);
+}
+
+/* 32 bit registers are zero extended so only zero/non-zero comparisons
+ work. */
+int
+v8plus_regcmp_p (code)
+ enum rtx_code code;
+{
+ return (code == EQ || code == NE);
+}
+
+/* Operand constraints. */
+
+/* Return non-zero only if OP is a register of mode MODE,
+ or const0_rtx. Don't allow const0_rtx if TARGET_LIVE_G0 because
+ %g0 may contain anything. */
+
+int
+reg_or_0_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+ if (TARGET_LIVE_G0)
+ return 0;
+ if (op == const0_rtx)
+ return 1;
+ if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (op) == 0
+ && CONST_DOUBLE_LOW (op) == 0)
+ return 1;
+ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
+ && GET_CODE (op) == CONST_DOUBLE
+ && fp_zero_operand (op))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if OP is a floating point value with value 0.0. */
+
+int
+fp_zero_operand (op)
+ rtx op;
+{
+ REAL_VALUE_TYPE r;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ return (REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r));
+}
+
+/* Nonzero if OP is an integer register. */
+
+int
+intreg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (register_operand (op, SImode)
+ || (TARGET_ARCH64 && register_operand (op, DImode)));
+}
+
+/* Nonzero if OP is a floating point condition code register. */
+
+int
+fcc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* This can happen when recog is called from combine. Op may be a MEM.
+ Fail instead of calling abort in this case. */
+ if (GET_CODE (op) != REG)
+ return 0;
+
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ if (mode == VOIDmode
+ && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
+ return 0;
+
+#if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
+ if (reg_renumber == 0)
+ return REGNO (op) >= FIRST_PSEUDO_REGISTER;
+ return REGNO_OK_FOR_CCFP_P (REGNO (op));
+#else
+ return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
+#endif
+}
+
+/* Nonzero if OP is an integer or floating point condition code register. */
+
+int
+icc_or_fcc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
+ {
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ if (mode == VOIDmode
+ && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
+ return 0;
+ return 1;
+ }
+
+ return fcc_reg_operand (op, mode);
+}
+
+/* Nonzero if OP can appear as the dest of a RESTORE insn. */
+int
+restore_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == REG && GET_MODE (op) == mode
+ && (REGNO (op) < 8 || (REGNO (op) >= 24 && REGNO (op) < 32)));
+}
+
+/* Call insn on SPARC can take a PC-relative constant address, or any regular
+ memory address. */
+
+int
+call_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ abort ();
+ op = XEXP (op, 0);
+ return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
+}
+
+int
+call_operand_address (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
+}
+
+/* Returns 1 if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+int
+symbolic_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+
+ /* ??? This clause seems to be irrelevant. */
+ case CONST_DOUBLE:
+ return GET_MODE (op) == mode;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return truth value of statement that OP is a symbolic memory
+ operand of mode MODE. */
+
+int
+symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
+ || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
+
+int
+label_ref_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != LABEL_REF)
+ return 0;
+ if (GET_MODE (op) != mode)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if the operand is an argument used in generating pic references
+ in either the medium/low or medium/anywhere code models of sparc64. */
+
+int
+sp64_medium_pic_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ /* Check for (const (minus (symbol_ref:GOT)
+ (const (minus (label) (pc))))). */
+ if (GET_CODE (op) != CONST)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != MINUS)
+ return 0;
+ if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
+ return 0;
+ /* ??? Ensure symbol is GOT. */
+ if (GET_CODE (XEXP (op, 1)) != CONST)
+ return 0;
+ if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if the operand is a data segment reference. This includes
+ the readonly data segment, or in other words anything but the text segment.
+ This is needed in the medium/anywhere code model on v9. These values
+ are accessed with EMBMEDANY_BASE_REG. */
+
+int
+data_segment_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF :
+ return ! SYMBOL_REF_FLAG (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return data_segment_operand (XEXP (op, 0));
+ default :
+ return 0;
+ }
+}
+
+/* Return 1 if the operand is a text segment reference.
+ This is needed in the medium/anywhere code model on v9. */
+
+int
+text_segment_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF :
+ return 1;
+ case SYMBOL_REF :
+ return SYMBOL_REF_FLAG (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return text_segment_operand (XEXP (op, 0));
+ default :
+ return 0;
+ }
+}
+
+/* Return 1 if the operand is either a register or a memory operand that is
+ not symbolic. */
+
+int
+reg_or_nonsymb_mem_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
+ return 1;
+
+ return 0;
+}
+
+int
+sparc_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return SMALL_INT (op);
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (GET_CODE (XEXP (op, 0)) == REG
+ && symbolic_operand (XEXP (op, 1), Pmode));
+ return memory_address_p (mode, op);
+}
+
+int
+move_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode == DImode && arith_double_operand (op, mode))
+ return 1;
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return SMALL_INT (op) || SPARC_SETHI_P (INTVAL (op));
+
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (register_operand (XEXP (op, 0), Pmode)
+ && CONSTANT_P (XEXP (op, 1)));
+ return memory_address_p (mode, op);
+}
+
+int
+splittable_symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+ if (! symbolic_operand (XEXP (op, 0), Pmode))
+ return 0;
+ return 1;
+}
+
+int
+splittable_immediate_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+ if (! immediate_operand (XEXP (op, 0), Pmode))
+ return 0;
+ return 1;
+}
+
+/* Return truth value of whether OP is EQ or NE. */
+
+int
+eq_or_neq (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
+}
+
+/* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
+ or LTU for non-floating-point. We handle those specially. */
+
+int
+normal_comp_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CCFPmode
+ || GET_MODE (XEXP (op, 0)) == CCFPEmode)
+ return 1;
+
+ return (code != NE && code != EQ && code != GEU && code != LTU);
+}
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+noov_compare_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode)
+ /* These are the only branches which work with CC_NOOVmode. */
+ return (code == EQ || code == NE || code == GE || code == LT);
+ return 1;
+}
+
+/* Nonzero if OP is a comparison operator suitable for use in v9
+ conditional move or branch on register contents instructions. */
+
+int
+v9_regcmp_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ return v9_regcmp_p (code);
+}
+
+/* ??? Same as eq_or_neq. */
+int
+v8plus_regcmp_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ return (code == EQ || code == NE);
+}
+
+/* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
+
+int
+extend_op (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can set
+ the condition codes explicitly. We do not include PLUS and MINUS
+ because these require CC_NOOVmode, which we handle explicitly. */
+
+int
+cc_arithop (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR
+ || GET_CODE (op) == XOR)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can bitwise
+ complement its second operand and set the condition codes explicitly. */
+
+int
+cc_arithopn (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ /* XOR is not here because combine canonicalizes (xor (not ...) ...)
+ and (xor ... (not ...)) to (not (xor ...)). */
+ return (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR);
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 13 bit immediate field. This is an acceptable SImode operand for
+ most 3 address instructions. */
+
+int
+arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int val;
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ val = INTVAL (op) & 0xffffffff;
+ return SPARC_SIMM13_P (val);
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 11 bit immediate field. This is an acceptable SImode operand for
+ the movcc instructions. */
+
+int
+arith11_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 10 bit immediate field. This is an acceptable SImode operand for
+ the movrcc instructions. */
+
+int
+arith10_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
+}
+
+/* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
+ immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
+ immediate field.
+ v9: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in a 13 bit immediate field. This is an acceptable DImode operand
+ for most 3 address instructions. */
+
+int
+arith_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
+ || (! TARGET_ARCH64
+ && GET_CODE (op) == CONST_DOUBLE
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
+ || (TARGET_ARCH64
+ && GET_CODE (op) == CONST_DOUBLE
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
+}
+
+/* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in an 11 bit immediate field. This is an acceptable DImode
+ operand for the movcc instructions. */
+/* ??? Replace with arith11_operand? */
+
+int
+arith11_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_DOUBLE
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
+ || (GET_CODE (op) == CONST_INT
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
+}
+
+/* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in an 10 bit immediate field. This is an acceptable DImode
+ operand for the movrcc instructions. */
+/* ??? Replace with arith10_operand? */
+
+int
+arith10_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_DOUBLE
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
+ || (GET_CODE (op) == CONST_INT
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
+}
+
+/* Return truth value of whether OP is a integer which fits the
+ range constraining immediate operands in most three-address insns,
+ which have a 13 bit immediate field. */
+
+int
+small_int (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+}
+
+/* Recognize operand values for the umul instruction. That instruction sign
+ extends immediate values just like all other sparc instructions, but
+ interprets the extended result as an unsigned number. */
+
+int
+uns_small_int (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* All allowed constants will fit a CONST_INT. */
+ return ((GET_CODE (op) == CONST_INT
+ && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
+ || (INTVAL (op) >= 0xFFFFF000 && INTVAL (op) < 0x100000000L)))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+#else
+ return (((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
+ || (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (op) == 0
+ && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+#endif
+}
+
+int
+uns_arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return register_operand (op, mode) || uns_small_int (op, mode);
+}
+
+/* Return truth value of statement that OP is a call-clobbered register. */
+int
+clobbered_register (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for the cc reg in the proper mode. */
+
+rtx
+gen_compare_reg (code, x, y)
+ enum rtx_code code;
+ rtx x, y;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg;
+
+ /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
+ fcc regs (cse can't tell they're really call clobbered regs and will
+ remove a duplicate comparison even if there is an intervening function
+ call - it will then try to reload the cc reg via an int reg which is why
+ we need the movcc patterns). It is possible to provide the movcc
+ patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
+ registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
+ to tell cse that CCFPE mode registers (even pseudos) are call
+ clobbered. */
+
+ /* ??? This is an experiment. Rather than making changes to cse which may
+ or may not be easy/clean, we do our own cse. This is possible because
+ we will generate hard registers. Cse knows they're call clobbered (it
+ doesn't know the same thing about pseudos). If we guess wrong, no big
+ deal, but if we win, great! */
+
+ if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+#if 1 /* experiment */
+ {
+ int reg;
+ /* We cycle through the registers to ensure they're all exercised. */
+ static int next_fcc_reg = 0;
+ /* Previous x,y for each fcc reg. */
+ static rtx prev_args[4][2];
+
+ /* Scan prev_args for x,y. */
+ for (reg = 0; reg < 4; reg++)
+ if (prev_args[reg][0] == x && prev_args[reg][1] == y)
+ break;
+ if (reg == 4)
+ {
+ reg = next_fcc_reg;
+ prev_args[reg][0] = x;
+ prev_args[reg][1] = y;
+ next_fcc_reg = (next_fcc_reg + 1) & 3;
+ }
+ cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
+ }
+#else
+ cc_reg = gen_reg_rtx (mode);
+#endif /* ! experiment */
+ else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
+ else
+ cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
+
+ if (TARGET_V8PLUS && mode == CCXmode)
+ {
+ emit_insn (gen_cmpdi_v8plus (x, y));
+ }
+ else
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
+ gen_rtx_COMPARE (mode, x, y)));
+ }
+
+ return cc_reg;
+}
+
+/* This function is used for v9 only.
+ CODE is the code for an Scc's comparison.
+ OPERANDS[0] is the target of the Scc insn.
+ OPERANDS[1] is the value we compare against const0_rtx (which hasn't
+ been generated yet).
+
+ This function is needed to turn
+
+ (set (reg:SI 110)
+ (gt (reg:CCX 100 %icc)
+ (const_int 0)))
+ into
+ (set (reg:SI 110)
+ (gt:DI (reg:CCX 100 %icc)
+ (const_int 0)))
+
+ IE: The instruction recognizer needs to see the mode of the comparison to
+ find the right instruction. We could use "gt:DI" right in the
+ define_expand, but leaving it out allows us to handle DI, SI, etc.
+
+ We refer to the global sparc compare operands sparc_compare_op0 and
+ sparc_compare_op1. */
+
+int
+gen_v9_scc (compare_code, operands)
+ enum rtx_code compare_code;
+ register rtx *operands;
+{
+ rtx temp, op0, op1;
+
+ if (! TARGET_ARCH64
+ && (GET_MODE (sparc_compare_op0) == DImode
+ || GET_MODE (operands[0]) == DImode))
+ return 0;
+
+ /* Handle the case where operands[0] == sparc_compare_op0.
+ We "early clobber" the result. */
+ if (REGNO (operands[0]) == REGNO (sparc_compare_op0))
+ {
+ op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
+ emit_move_insn (op0, sparc_compare_op0);
+ }
+ else
+ op0 = sparc_compare_op0;
+ /* For consistency in the following. */
+ op1 = sparc_compare_op1;
+
+ /* Try to use the movrCC insns. */
+ if (TARGET_ARCH64
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && op1 == const0_rtx
+ && v9_regcmp_p (compare_code))
+ {
+ /* Special case for op0 != 0. This can be done with one instruction if
+ operands[0] == sparc_compare_op0. We don't assume they are equal
+ now though. */
+
+ if (compare_code == NE
+ && GET_MODE (operands[0]) == DImode
+ && GET_MODE (op0) == DImode)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], op0));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (DImode,
+ gen_rtx_fmt_ee (compare_code, DImode,
+ op0, const0_rtx),
+ const1_rtx,
+ operands[0])));
+ return 1;
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
+ if (GET_MODE (op0) != DImode)
+ {
+ temp = gen_reg_rtx (DImode);
+ convert_move (temp, op0, 0);
+ }
+ else
+ temp = op0;
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_fmt_ee (compare_code, DImode,
+ temp, const0_rtx),
+ const1_rtx,
+ operands[0])));
+ return 1;
+ }
+ else
+ {
+ operands[1] = gen_compare_reg (compare_code, op0, op1);
+
+ switch (GET_MODE (operands[1]))
+ {
+ case CCmode :
+ case CCXmode :
+ case CCFPEmode :
+ case CCFPmode :
+ break;
+ default :
+ abort ();
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_fmt_ee (compare_code,
+ GET_MODE (operands[1]),
+ operands[1], const0_rtx),
+ const1_rtx, operands[0])));
+ return 1;
+ }
+}
+
+/* Emit a conditional jump insn for the v9 architecture using comparison code
+ CODE and jump target LABEL.
+ This function exists to take advantage of the v9 brxx insns. */
+
+void
+emit_v9_brxx_insn (code, op0, label)
+ enum rtx_code code;
+ rtx op0, label;
+{
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_fmt_ee (code, GET_MODE (op0),
+ op0, const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx)));
+}
+
+/* Return nonzero if a return peephole merging return with
+ setting of output register is ok. */
+int
+leaf_return_peephole_ok ()
+{
+ return (actual_fsize == 0);
+}
+
+/* Return nonzero if TRIAL can go into the function epilogue's
+ delay slot. SLOT is the slot we are trying to fill. */
+
+int
+eligible_for_epilogue_delay (trial, slot)
+ rtx trial;
+ int slot;
+{
+ rtx pat, src;
+
+ if (slot >= 1)
+ return 0;
+
+ if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
+ return 0;
+
+ if (get_attr_length (trial) != 1)
+ return 0;
+
+ /* If %g0 is live, there are lots of things we can't handle.
+ Rather than trying to find them all now, let's punt and only
+ optimize things as necessary. */
+ if (TARGET_LIVE_G0)
+ return 0;
+
+ /* In the case of a true leaf function, anything can go into the delay slot.
+ A delay slot only exists however if the frame size is zero, otherwise
+ we will put an insn to adjust the stack after the return. */
+ if (leaf_function)
+ {
+ if (leaf_return_peephole_ok ())
+ return ((get_attr_in_uncond_branch_delay (trial)
+ == IN_BRANCH_DELAY_TRUE));
+ return 0;
+ }
+
+ /* If only trivial `restore' insns work, nothing can go in the
+ delay slot. */
+ else if (TARGET_BROKEN_SAVERESTORE)
+ return 0;
+
+ pat = PATTERN (trial);
+
+ /* Otherwise, only operations which can be done in tandem with
+ a `restore' insn can go into the delay slot. */
+ if (GET_CODE (SET_DEST (pat)) != REG
+ || REGNO (SET_DEST (pat)) >= 32
+ || REGNO (SET_DEST (pat)) < 24)
+ return 0;
+
+ /* The set of insns matched here must agree precisely with the set of
+ patterns paired with a RETURN in sparc.md. */
+
+ src = SET_SRC (pat);
+
+ /* This matches "*return_[qhs]i". */
+ if (arith_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
+
+ /* This matches "*return_di". */
+ else if (arith_double_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
+
+ /* This matches "*return_sf_no_fpu". */
+ else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode)
+ && register_operand (src, SFmode))
+ return 1;
+
+ /* This matches "*return_addsi". */
+ else if (GET_CODE (src) == PLUS
+ && arith_operand (XEXP (src, 0), SImode)
+ && arith_operand (XEXP (src, 1), SImode)
+ && (register_operand (XEXP (src, 0), SImode)
+ || register_operand (XEXP (src, 1), SImode)))
+ return 1;
+
+ /* This matches "*return_adddi". */
+ else if (GET_CODE (src) == PLUS
+ && arith_double_operand (XEXP (src, 0), DImode)
+ && arith_double_operand (XEXP (src, 1), DImode)
+ && (register_operand (XEXP (src, 0), DImode)
+ || register_operand (XEXP (src, 1), DImode)))
+ return 1;
+
+ return 0;
+}
+
+static int
+check_return_regs (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ return IN_OR_GLOBAL_P (x);
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case SET:
+ case IOR:
+ case AND:
+ case XOR:
+ case PLUS:
+ case MINUS:
+ if (check_return_regs (XEXP (x, 1)) == 0)
+ return 0;
+ case NOT:
+ case NEG:
+ case MEM:
+ return check_return_regs (XEXP (x, 0));
+
+ default:
+ return 0;
+ }
+
+}
+
+/* Return 1 if TRIAL references only in and global registers. */
+int
+eligible_for_return_delay (trial)
+ rtx trial;
+{
+ if (GET_CODE (PATTERN (trial)) != SET)
+ return 0;
+
+ return check_return_regs (PATTERN (trial));
+}
+
+int
+short_branch (uid1, uid2)
+ int uid1, uid2;
+{
+ unsigned int delta = insn_addresses[uid1] - insn_addresses[uid2];
+ if (delta + 1024 < 2048)
+ return 1;
+ /* warning ("long branch, distance %d", delta); */
+ return 0;
+}
+
+/* Return non-zero if REG is not used after INSN.
+ We assume REG is a reload reg, and therefore does
+ not live past labels or calls or jumps. */
+int
+reg_unused_after (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ enum rtx_code code, prev_code = UNKNOWN;
+
+ while ((insn = NEXT_INSN (insn)))
+ {
+ if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
+ return 1;
+
+ code = GET_CODE (insn);
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set = single_set (insn);
+ int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
+ if (set && in_src)
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return 1;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
+ }
+ prev_code = code;
+ }
+ return 1;
+}
+
+/* The table we use to reference PIC data. */
+static rtx global_offset_table;
+
+/* The function we use to get at it. */
+static rtx get_pc_symbol;
+static char get_pc_symbol_name[256];
+
+/* Ensure that we are not using patterns that are not OK with PIC. */
+
+int
+check_pic (i)
+ int i;
+{
+ switch (flag_pic)
+ {
+ case 1:
+ if (GET_CODE (recog_operand[i]) == SYMBOL_REF
+ || (GET_CODE (recog_operand[i]) == CONST
+ && ! (GET_CODE (XEXP (recog_operand[i], 0)) == MINUS
+ && (XEXP (XEXP (recog_operand[i], 0), 0)
+ == global_offset_table)
+ && (GET_CODE (XEXP (XEXP (recog_operand[i], 0), 1))
+ == CONST))))
+ abort ();
+ case 2:
+ default:
+ return 1;
+ }
+}
+
+/* Return true if X is an address which needs a temporary register when
+ reloaded while generating PIC code. */
+
+int
+pic_address_needs_scratch (x)
+ rtx x;
+{
+ /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
+ return 1;
+
+ return 0;
+}
+
+/* Legitimize PIC addresses. If the address is already position-independent,
+ we return ORIG. Newly generated position-independent addresses go into a
+ reg. This is REG if non zero, otherwise we allocate register(s) as
+ necessary. */
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (flag_pic == 2)
+ {
+ /* If not during reload, allocate another temp reg here for loading
+ in the address, so that these instructions can be optimized
+ properly. */
+ rtx temp_reg = ((reload_in_progress || reload_completed)
+ ? reg : gen_reg_rtx (Pmode));
+
+ /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
+ won't get confused into thinking that these two instructions
+ are loading in the true address of the symbol. If in the
+ future a PIC rtx exists, that should be used instead. */
+ emit_insn (gen_pic_sethi_si (temp_reg, orig));
+ emit_insn (gen_pic_lo_sum_si (temp_reg, temp_reg, orig));
+
+ address = temp_reg;
+ }
+ else
+ address = orig;
+
+ pic_ref = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx, address));
+ current_function_uses_pic_offset_table = 1;
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant_for_output (base, INTVAL (offset));
+ else if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ /* If we reach here, then something is seriously wrong. */
+ abort ();
+ }
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ return orig;
+}
+
+/* Set up PIC-specific rtl. This should not cause any insns
+ to be emitted. */
+
+void
+initialize_pic ()
+{
+}
+
+/* Return the RTX for insns to set the PIC register. */
+
+static rtx
+pic_setup_code ()
+{
+ rtx seq;
+
+ start_sequence ();
+ emit_insn (gen_get_pc (pic_offset_table_rtx, global_offset_table,
+ get_pc_symbol));
+ seq = gen_sequence ();
+ end_sequence ();
+
+ return seq;
+}
+
+/* Emit special PIC prologues and epilogues. */
+
+void
+finalize_pic ()
+{
+ /* Labels to get the PC in the prologue of this function. */
+ int orig_flag_pic = flag_pic;
+ rtx insn;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ /* If we havn't emitted the special get_pc helper function, do so now. */
+ if (get_pc_symbol_name[0] == 0)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (get_pc_symbol_name, "LGETPC", 0);
+
+ text_section ();
+ ASM_OUTPUT_ALIGN (asm_out_file, 3);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LGETPC", 0);
+ fputs ("\tretl\n\tadd %o7,%l7,%l7\n", asm_out_file);
+ }
+
+ /* Initialize every time through, since we can't easily
+ know this to be permanent. */
+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ get_pc_symbol = gen_rtx_SYMBOL_REF (Pmode, get_pc_symbol_name);
+ flag_pic = 0;
+
+ emit_insn_after (pic_setup_code (), get_insns ());
+
+ /* Insert the code in each nonlocal goto receiver. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
+ && XINT (PATTERN (insn), 1) == 4)
+ emit_insn_after (pic_setup_code (), insn);
+
+ flag_pic = orig_flag_pic;
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up.
+ ??? In the case where we don't obey regdecls, this is not sufficient
+ since we may not fall out the bottom. */
+ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+}
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally. */
+
+int
+emit_move_sequence (operands, mode)
+ rtx *operands;
+ enum machine_mode mode;
+{
+ register rtx operand0 = operands[0];
+ register rtx operand1 = operands[1];
+
+ if (CONSTANT_P (operand1) && flag_pic
+ && pic_address_needs_scratch (operand1))
+ operands[1] = operand1 = legitimize_pic_address (operand1, mode, 0);
+
+ /* Handle most common case first: storing into a register. */
+ if (register_operand (operand0, mode))
+ {
+ /* Integer constant to FP register. */
+ if (GET_CODE (operand0) == REG
+ && REGNO (operand0) >= 32
+ && REGNO (operand0) < FIRST_PSEUDO_REGISTER
+ && CONSTANT_P (operand1))
+ {
+ operand1 = validize_mem (force_const_mem (GET_MODE (operand0), operand1));
+ }
+
+ if (register_operand (operand1, mode)
+ || (GET_CODE (operand1) == CONST_INT && SMALL_INT (operand1))
+ || (GET_CODE (operand1) == CONST_DOUBLE
+ && arith_double_operand (operand1, DImode))
+ || (GET_CODE (operand1) == HIGH && GET_MODE (operand1) != DImode)
+ /* Only `general_operands' can come here, so MEM is ok. */
+ || GET_CODE (operand1) == MEM)
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ }
+ else if (GET_CODE (operand0) == MEM)
+ {
+ if (register_operand (operand1, mode)
+ || (operand1 == const0_rtx && ! TARGET_LIVE_G0))
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operand0);
+ operands[1] = operand1 = force_reg (mode, operand1);
+ }
+ }
+
+ /* DImode HIGH values in sparc64 need a clobber added. */
+ if (TARGET_ARCH64
+ && GET_CODE (operand1) == HIGH && GET_MODE (operand1) == DImode)
+ {
+ emit_insn (gen_sethi_di_sp64 (operand0, XEXP (operand1, 0)));
+ return 1;
+ }
+ /* Simplify the source if we need to. */
+ else if (GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
+ {
+ if (flag_pic && symbolic_operand (operand1, mode))
+ {
+ rtx temp_reg = reload_in_progress ? operand0 : 0;
+
+ operands[1] = legitimize_pic_address (operand1, mode, temp_reg);
+ }
+ else if (GET_CODE (operand1) == CONST_INT
+ ? (! SMALL_INT (operand1)
+ && INTVAL (operand1) != -4096
+ && ! SPARC_SETHI_P (INTVAL (operand1)))
+ : GET_CODE (operand1) == CONST_DOUBLE
+ ? ! arith_double_operand (operand1, DImode)
+ : 1)
+ {
+ /* For DImode values, temp must be operand0 because of the way
+ HI and LO_SUM work. The LO_SUM operator only copies half of
+ the LSW from the dest of the HI operator. If the LO_SUM dest is
+ not the same as the HI dest, then the MSW of the LO_SUM dest will
+ never be set.
+
+ ??? The real problem here is that the ...(HI:DImode pattern emits
+ multiple instructions, and the ...(LO_SUM:DImode pattern emits
+ one instruction. This fails, because the compiler assumes that
+ LO_SUM copies all bits of the first operand to its dest. Better
+ would be to have the HI pattern emit one instruction and the
+ LO_SUM pattern multiple instructions. Even better would be
+ to use four rtl insns. */
+ rtx temp = ((reload_in_progress || mode == DImode)
+ ? operand0 : gen_reg_rtx (mode));
+
+ if (mode == SImode)
+ {
+ if (GET_CODE (operand1) == CONST_INT)
+ operand1 = GEN_INT (INTVAL (operand1) & 0xffffffff);
+ else if (GET_CODE (operand1) == CONST_DOUBLE)
+ operand1 = GEN_INT (CONST_DOUBLE_LOW (operand1) & 0xffffffff);
+ }
+
+ if (TARGET_ARCH64 && mode == DImode)
+ emit_insn (gen_sethi_di_sp64 (temp, operand1));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_HIGH (mode, operand1)));
+
+ operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
+ }
+ }
+
+ /* Now have insn-emit do whatever it normally does. */
+ return 0;
+}
+
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as a 4 byte quantity.
+
+ This isn't intended to be very smart. It is up to the caller to
+ choose the best way to do things.
+
+ Note that OPERANDS may be modified to suit the returned string. */
+
+char *
+singlemove_string (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (GET_CODE (operands[1]) != MEM)
+ return "st %r1,%0";
+ else
+ abort ();
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return "ld %1,%0";
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r;
+ long i;
+
+ /* Must be SFmode, otherwise this doesn't make sense. */
+ if (GET_MODE (operands[1]) != SFmode)
+ abort ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+
+ if (CONST_OK_FOR_LETTER_P (i, 'I'))
+ return "mov %1,%0";
+ else if ((i & 0x000003FF) != 0)
+ return "sethi %%hi(%a1),%0\n\tor %0,%%lo(%a1),%0";
+ else
+ return "sethi %%hi(%a1),%0";
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* Only consider the low 32 bits of the constant. */
+ int i = INTVAL (operands[1]) & 0xffffffff;
+
+ if (SPARC_SIMM13_P (i))
+ return "mov %1,%0";
+
+ if (i == 4096)
+ return "sub %%g0,-4096,%0";
+
+ /* If all low order 10 bits are clear, then we only need a single
+ sethi insn to load the constant. */
+ /* FIXME: Use SETHI_P. */
+ if ((i & 0x000003FF) != 0)
+ return "sethi %%hi(%a1),%0\n\tor %0,%%lo(%a1),%0";
+ else
+ return "sethi %%hi(%a1),%0";
+ }
+ /* Operand 1 must be a register, or a 'I' type CONST_INT. */
+ return "mov %1,%0";
+}
+
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as an 8 byte quantity.
+
+ This isn't intended to be very smart. It is up to the caller to
+ choose the best way to do things.
+
+ Note that OPERANDS may be modified to suit the returned string. */
+
+char *
+doublemove_string (operands)
+ rtx *operands;
+{
+ rtx op0 = operands[0], op1 = operands[1];
+
+ if (GET_CODE (op0) == MEM)
+ {
+ if (GET_CODE (op1) == REG)
+ {
+ if (FP_REG_P (op1))
+ return "std %1,%0";
+ return TARGET_ARCH64 ? "stx %1,%0" : "std %1,%0";
+ }
+ if (TARGET_ARCH64
+ && (op1 == const0_rtx
+ || (GET_MODE (op1) != VOIDmode
+ && op1 == CONST0_RTX (GET_MODE (op1)))))
+ return "stx %r1,%0";
+ abort ();
+ }
+ else if (GET_CODE (op1) == MEM)
+ {
+ if (GET_CODE (op0) != REG)
+ abort ();
+ if (FP_REG_P (op0))
+ return "ldd %1,%0";
+ return TARGET_ARCH64 ? "ldx %1,%0" : "ldd %1,%0";
+ }
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ /* ??? Unfinished, and maybe not needed. */
+ abort ();
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* ??? Unfinished, and maybe not needed. */
+ abort ();
+ }
+ /* Operand 1 must be a register, or a 'I' type CONST_INT. */
+ return "mov %1,%0";
+}
+
+/* Return non-zero if it is OK to assume that the given memory operand is
+ aligned at least to a 8-byte boundary. This should only be called
+ for memory accesses whose size is 8 bytes or larger. */
+
+int
+mem_aligned_8 (mem)
+ register rtx mem;
+{
+ register rtx addr;
+ register rtx base;
+ register rtx offset;
+
+ if (GET_CODE (mem) != MEM)
+ return 0; /* It's gotta be a MEM! */
+
+ addr = XEXP (mem, 0);
+
+ /* Now that all misaligned double parms are copied on function entry,
+ we can assume any 64-bit object is 64-bit aligned except those which
+ are at unaligned offsets from the stack or frame pointer. If the
+ TARGET_UNALIGNED_DOUBLES switch is given, we do not make this
+ assumption. */
+
+ /* See what register we use in the address. */
+ base = offset = 0;
+ if (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ base = XEXP (addr, 0);
+ offset = XEXP (addr, 1);
+ }
+ }
+ else if (GET_CODE (addr) == REG)
+ {
+ base = addr;
+ offset = const0_rtx;
+ }
+
+ /* If it's the stack or frame pointer, check offset alignment.
+ We can have improper alignment in the function entry code. */
+ if (base
+ && (REGNO (base) == FRAME_POINTER_REGNUM
+ || REGNO (base) == STACK_POINTER_REGNUM))
+ {
+ if (((INTVAL (offset) - SPARC_STACK_BIAS) & 0x7) == 0)
+ return 1;
+ }
+ /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
+ is true, in which case we can only assume that an access is aligned if
+ it is to a constant address, or the address involves a LO_SUM.
+
+ We used to assume an address was aligned if MEM_IN_STRUCT_P was true.
+ That assumption was deleted so that gcc generated code can be used with
+ memory allocators that only guarantee 4 byte alignment. */
+ else if (! TARGET_UNALIGNED_DOUBLES || CONSTANT_P (addr)
+ || GET_CODE (addr) == LO_SUM)
+ return 1;
+
+ /* An obviously unaligned address. */
+ return 0;
+}
+
+enum optype { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP };
+
+/* Output assembler code to perform a doubleword move insn
+ with operands OPERANDS. This is very similar to the following
+ output_move_quad function. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+ register enum optype optype0;
+ register enum optype optype1;
+ rtx latehalf[2];
+ rtx addreg0 = 0;
+ rtx addreg1 = 0;
+ int highest_first = 0;
+ int no_addreg1_decrement = 0;
+
+ /* First classify both operands. */
+
+ if (REG_P (op0))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (op0))
+ optype0 = OFFSOP;
+ else if (GET_CODE (op0) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (op1))
+ optype1 = REGOP;
+ else if (CONSTANT_P (op1))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (op1))
+ optype1 = OFFSOP;
+ else if (GET_CODE (op1) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP
+ || (optype0 == MEM && optype1 == MEM))
+ abort ();
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the second word. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (op0, 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (op1, 0));
+
+ /* Ok, we can do one word at a time.
+ Set up in LATEHALF the operands to use for the
+ high-numbered (least significant) word and in some cases alter the
+ operands in OPERANDS to be suitable for the low-numbered word. */
+
+ if (optype0 == REGOP)
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (op0) + 1);
+ else if (optype0 == OFFSOP)
+ latehalf[0] = adj_offsettable_operand (op0, 4);
+ else
+ latehalf[0] = op0;
+
+ if (optype1 == REGOP)
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (op1) + 1);
+ else if (optype1 == OFFSOP)
+ latehalf[1] = adj_offsettable_operand (op1, 4);
+ else if (optype1 == CNSTOP)
+ {
+ if (TARGET_ARCH64)
+ {
+ if (arith_double_operand (op1, DImode))
+ {
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
+ return "mov %1,%0";
+ }
+ else
+ {
+ /* The only way to handle CONST_DOUBLEs or other 64 bit
+ constants here is to use a temporary, such as is done
+ for the V9 DImode sethi insn pattern. This is not
+ a practical solution, so abort if we reach here.
+ The md file should always force such constants to
+ memory. */
+ abort ();
+ }
+ }
+ else
+ split_double (op1, &operands[1], &latehalf[1]);
+ }
+ else
+ latehalf[1] = op1;
+
+ /* Easy case: try moving both words at once. Check for moving between
+ an even/odd register pair and a memory location. */
+ if ((optype0 == REGOP && optype1 != REGOP && optype1 != CNSTOP
+ && (TARGET_ARCH64 || (REGNO (op0) & 1) == 0))
+ || (optype0 != REGOP && optype0 != CNSTOP && optype1 == REGOP
+ && (TARGET_ARCH64 || (REGNO (op1) & 1) == 0)))
+ {
+ register rtx mem,reg;
+
+ if (optype0 == REGOP)
+ mem = op1, reg = op0;
+ else
+ mem = op0, reg = op1;
+
+ /* In v9, ldd can be used for word aligned addresses, so technically
+ some of this logic is unneeded. We still avoid ldd if the address
+ is obviously unaligned though.
+
+ Integer ldd/std are deprecated in V9 and are slow on UltraSPARC.
+ Use them only if the access is volatile or not offsettable. */
+
+ if ((mem_aligned_8 (mem)
+ && (REGNO (reg) >= 32
+ || MEM_VOLATILE_P (mem)
+ || ! ((optype0 == OFFSOP || optype1 == OFFSOP)
+ && (sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_V9))))
+ /* If this is a floating point register higher than %f31,
+ then we *must* use an aligned load, since `ld' will not accept
+ the register number. */
+ || (TARGET_V9 && REGNO (reg) >= 64)
+ /* Even if two instructions would otherwise be better than ldd/std,
+ if this insn was put in a delay slot because reorg thought it
+ was only one machine instruction, make sure it is only one
+ instruction. */
+ || dbr_sequence_length () != 0)
+ {
+ if (FP_REG_P (reg) || ! TARGET_ARCH64)
+ return (mem == op1 ? "ldd %1,%0" : "std %1,%0");
+ else
+ return (mem == op1 ? "ldx %1,%0" : "stx %1,%0");
+ }
+ }
+
+ if (TARGET_ARCH64)
+ {
+ if (optype0 == REGOP && optype1 == REGOP)
+ {
+ if (FP_REG_P (op0))
+ return "fmovd %1,%0";
+ else
+ return "mov %1,%0";
+ }
+ }
+
+ /* If the first move would clobber the source of the second one,
+ do them in the other order. */
+
+ /* Overlapping registers. */
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (op0) == REGNO (latehalf[1]))
+ {
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+ /* Do low-numbered word. */
+ return singlemove_string (operands);
+ }
+ /* Loading into a register which overlaps a register used in the address. */
+ else if (optype0 == REGOP && optype1 != REGOP
+ && reg_overlap_mentioned_p (op0, op1))
+ {
+ /* If both halves of dest are used in the src memory address,
+ add the two regs and put them in the low reg (op0).
+ Then it works to load latehalf first. */
+ if (reg_mentioned_p (op0, XEXP (op1, 0))
+ && reg_mentioned_p (latehalf[0], XEXP (op1, 0)))
+ {
+ rtx xops[2];
+ xops[0] = latehalf[0];
+ xops[1] = op0;
+ output_asm_insn ("add %1,%0,%1", xops);
+ operands[1] = gen_rtx_MEM (DImode, op0);
+ latehalf[1] = adj_offsettable_operand (operands[1], 4);
+ addreg1 = 0;
+ highest_first = 1;
+ }
+ /* Only one register in the dest is used in the src memory address,
+ and this is the first register of the dest, so we want to do
+ the late half first here also. */
+ else if (! reg_mentioned_p (latehalf[0], XEXP (op1, 0)))
+ highest_first = 1;
+ /* Only one register in the dest is used in the src memory address,
+ and this is the second register of the dest, so we want to do
+ the late half last. If addreg1 is set, and addreg1 is the same
+ register as latehalf, then we must suppress the trailing decrement,
+ because it would clobber the value just loaded. */
+ else if (addreg1 && reg_mentioned_p (addreg1, latehalf[0]))
+ no_addreg1_decrement = 1;
+ }
+
+ /* Normal case: do the two words, low-numbered first.
+ Overlap case (highest_first set): do high-numbered word first. */
+
+ if (! highest_first)
+ output_asm_insn (singlemove_string (operands), operands);
+
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x4,%0", &addreg0);
+ if (addreg1 && ! no_addreg1_decrement)
+ output_asm_insn ("add %0,-0x4,%0", &addreg1);
+
+ if (highest_first)
+ output_asm_insn (singlemove_string (operands), operands);
+
+ return "";
+}
+
+/* Output assembler code to perform a quadword move insn
+ with operands OPERANDS. This is very similar to the preceding
+ output_move_double function. */
+
+char *
+output_move_quad (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+ register enum optype optype0;
+ register enum optype optype1;
+ rtx wordpart[4][2];
+ rtx load_late[4];
+ int load_late_half[2];
+ rtx addreg0 = 0;
+ rtx addreg1 = 0;
+
+ load_late_half[0] = 0; load_late_half[1] = 0;
+ load_late[0] = 0; load_late[1] = 0; load_late[2] = 0;
+ load_late[3] = 0;
+
+ wordpart[0][0] = NULL; wordpart[1][0] = NULL; wordpart[2][0] = NULL;
+ wordpart[3][0] = NULL;
+
+ /* First classify both operands. */
+
+ if (REG_P (op0))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (op0))
+ optype0 = OFFSOP;
+ else if (GET_CODE (op0) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (op1))
+ optype1 = REGOP;
+ else if (CONSTANT_P (op1))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (op1))
+ optype1 = OFFSOP;
+ else if (GET_CODE (op1) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP
+ || (optype0 == MEM && optype1 == MEM))
+ abort ();
+
+ if (optype0 == REGOP)
+ {
+ wordpart[0][0] = gen_rtx_REG (word_mode, REGNO (op0) + 0);
+ if (TARGET_ARCH64 && FP_REG_P (op0)
+ && REGNO (op0) < SPARC_FIRST_V9_FP_REG)
+ wordpart[1][0] = gen_rtx_REG (word_mode, REGNO (op0) + 2);
+ else
+ wordpart[1][0] = gen_rtx_REG (word_mode, REGNO (op0) + 1);
+
+ if (TARGET_ARCH32)
+ {
+ wordpart[2][0] = gen_rtx_REG (word_mode, REGNO (op0) + 2);
+ wordpart[3][0] = gen_rtx_REG (word_mode, REGNO (op0) + 3);
+ }
+
+ /* Loading into a register which overlaps a register used in the
+ address. */
+ if (optype1 != REGOP && reg_overlap_mentioned_p (op0, op1))
+ {
+ int i;
+ int count;
+
+ count = 0;
+
+ for (i = 0; i < 4 && wordpart[i][0] != NULL; i++)
+ {
+ if (reg_mentioned_p (wordpart[i][0], op1))
+ {
+ load_late[i] = wordpart[i][0];
+ load_late_half[TARGET_ARCH64 ? i : i/2] = 1;
+ count++;
+ }
+ }
+ if (count > 2)
+ {
+ /* Not sure what to do here. Multiple adds? Can't happen. */
+ abort ();
+ }
+ else if (count == 2)
+ {
+ /* We have a two-address source operand, and both registers
+ overlap with the dest quad. Add them together and
+ store the result into the last register of the quad being
+ loaded, then generate an appropriate MEM insn. */
+ rtx temp[3];
+ int place = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ if (load_late[i])
+ {
+ temp[place++] = load_late[i];
+ load_late[i] = 0;
+ }
+ }
+ temp[2] = wordpart[3][0];
+ output_asm_insn ("add %0, %1, %2", temp);
+ load_late_half[0] = 0;
+ load_late_half[1] = 1;
+ op1 = gen_rtx_MEM (TFmode, wordpart[3][0]);
+ operands[1] = op1;
+ optype1 = OFFSOP;
+ }
+ }
+ }
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the later words. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (op0, 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (op1, 0));
+
+ /* Ok, we can do one word at a time.
+ Set up in wordpart the operands to use for each word of the arguments. */
+
+ if (optype0 == OFFSOP)
+ {
+ wordpart[0][0] = adj_offsettable_operand (op0, 0);
+ if (TARGET_ARCH32)
+ {
+ wordpart[1][0] = adj_offsettable_operand (op0, 4);
+ wordpart[2][0] = adj_offsettable_operand (op0, 8);
+ wordpart[3][0] = adj_offsettable_operand (op0, 12);
+ }
+ else
+ wordpart[1][0] = adj_offsettable_operand (op0, 8);
+ }
+ else if (optype0 != REGOP)
+ {
+ wordpart[0][0] = op0;
+ wordpart[1][0] = op0;
+ wordpart[2][0] = op0;
+ wordpart[3][0] = op0;
+ }
+
+ if (optype1 == REGOP)
+ {
+ wordpart[0][1] = gen_rtx_REG (word_mode, REGNO (op1) + 0);
+ if (TARGET_ARCH64 && FP_REG_P (op1)
+ && REGNO (op1) < SPARC_FIRST_V9_FP_REG)
+ wordpart[1][1] = gen_rtx_REG (word_mode, REGNO (op1) + 2);
+ else
+ wordpart[1][1] = gen_rtx_REG (word_mode, REGNO (op1) + 1);
+
+ if (TARGET_ARCH32)
+ {
+ wordpart[2][1] = gen_rtx_REG (word_mode, REGNO (op1) + 2);
+ wordpart[3][1] = gen_rtx_REG (word_mode, REGNO (op1) + 3);
+ }
+ }
+ else if (optype1 == OFFSOP)
+ {
+ wordpart[0][1] = adj_offsettable_operand (op1, 0);
+ if (TARGET_ARCH32)
+ {
+ wordpart[1][1] = adj_offsettable_operand (op1, 4);
+ wordpart[2][1] = adj_offsettable_operand (op1, 8);
+ wordpart[3][1] = adj_offsettable_operand (op1, 12);
+ }
+ else
+ wordpart[1][1] = adj_offsettable_operand (op1, 8);
+ }
+ else if (optype1 == CNSTOP)
+ {
+ REAL_VALUE_TYPE r;
+ long l[4];
+
+ /* This only works for TFmode floating point constants. */
+ if (GET_CODE (op1) != CONST_DOUBLE || GET_MODE (op1) != TFmode)
+ abort ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op1);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+
+ wordpart[0][1] = GEN_INT (l[0]);
+ wordpart[1][1] = GEN_INT (l[1]);
+ wordpart[2][1] = GEN_INT (l[2]);
+ wordpart[3][1] = GEN_INT (l[3]);
+ }
+ else
+ {
+ wordpart[0][1] = op1;
+ wordpart[1][1] = op1;
+ wordpart[2][1] = op1;
+ wordpart[3][1] = op1;
+ }
+
+ /* Easy case: try moving the quad as two pairs. Check for moving between
+ an even/odd register pair and a memory location.
+ Also handle new v9 fp regs here. */
+ /* ??? Should also handle the case of non-offsettable addresses here.
+ We can at least do the first pair as a ldd/std, and then do the third
+ and fourth words individually. */
+ if ((optype0 == REGOP && optype1 == OFFSOP && (REGNO (op0) & 1) == 0)
+ || (optype0 == OFFSOP && optype1 == REGOP && (REGNO (op1) & 1) == 0))
+ {
+ rtx mem, reg;
+ int use_ldx;
+
+ if (optype0 == REGOP)
+ mem = op1, reg = op0;
+ else
+ mem = op0, reg = op1;
+
+ if (mem_aligned_8 (mem)
+ /* If this is a floating point register higher than %f31,
+ then we *must* use an aligned load, since `ld' will not accept
+ the register number. */
+ || (TARGET_V9 && REGNO (reg) >= SPARC_FIRST_V9_FP_REG))
+ {
+ static char * const mov_by_64[2][2][2] = {
+ { { "std %S1,%2;std %1,%0", "stx %R1,%2;stx %1,%0" },
+ { "ldd %2,%S0;ldd %1,%0", "ldx %2,%R0;ldx %1,%0" } },
+ { { "std %1,%0;std %S1,%2", "stx %1,%0;stx %R1,%2" },
+ { "ldd %1,%0;ldd %2,%S0", "ldx %1,%0;ldx %2,%R0" } }
+ };
+
+ if (TARGET_V9 && FP_REG_P (reg) && TARGET_HARD_QUAD)
+ {
+ /* Only abort if the register # requires that we use ldq. */
+ if ((REGNO (reg) & 3) == 0)
+ {
+ /* ??? Can `mem' have an inappropriate alignment here? */
+ return (mem == op1 ? "ldq %1,%0" : "stq %1,%0");
+ }
+ else
+ {
+ if (REGNO (reg) >= SPARC_FIRST_V9_FP_REG)
+ abort();
+ }
+ }
+ operands[2] = adj_offsettable_operand (mem, 8);
+
+ /* Do the loads in the right order; can't overwrite our address
+ register. */
+ use_ldx = TARGET_ARCH64 && !FP_REG_P (reg);
+ return mov_by_64[!load_late_half[0]][mem == op1][use_ldx];
+ }
+ }
+
+ /* If the first move would clobber the source of the second one,
+ do them in the other order. */
+
+ /* Overlapping registers? */
+ if (TARGET_ARCH32)
+ {
+ if (optype0 == REGOP && optype1 == REGOP
+ && (REGNO (op0) == REGNO (wordpart[1][3])
+ || REGNO (op0) == REGNO (wordpart[1][2])
+ || REGNO (op0) == REGNO (wordpart[1][1])))
+ {
+ /* Do fourth word. */
+ output_asm_insn (singlemove_string (wordpart[3]), wordpart[3]);
+ /* Do the third word. */
+ output_asm_insn (singlemove_string (wordpart[2]), wordpart[2]);
+ /* Do the second word. */
+ output_asm_insn (singlemove_string (wordpart[1]), wordpart[1]);
+ /* Do lowest-numbered word. */
+ output_asm_insn (singlemove_string (wordpart[0]), wordpart[0]);
+ return "";
+ }
+ }
+ else /* TARGET_ARCH64 */
+ {
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (op0) == REGNO (wordpart[1][1]))
+ {
+ output_asm_insn ("mov %1,%0", wordpart[1]);
+ output_asm_insn ("mov %1,%0", wordpart[0]);
+ return "";
+ }
+ }
+
+ /* Normal case: move the words in lowest to highest address order.
+ There may have an overlapping register; in that case, skip and go
+ back. */
+
+ if (TARGET_ARCH32)
+ {
+ int i;
+ int offset = 0xc;
+ rtx temp[2];
+
+ for (i = 0; i < 4; i++)
+ {
+ if (! load_late[i])
+ output_asm_insn (singlemove_string (wordpart[i]), wordpart[i]);
+
+ if (i != 3)
+ {
+ /* Make any unoffsettable addresses point at the next word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ {
+ if (load_late[i])
+ {
+ int fix = offset - i * 4;
+
+ /* Back up to the appropriate place. */
+ temp[1] = GEN_INT (-fix);
+ if (addreg0)
+ {
+ temp[0] = addreg0;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ if (addreg1)
+ {
+ temp[0] = addreg1;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ output_asm_insn (singlemove_string (wordpart[i]),
+ wordpart[i]);
+ /* Don't modify the register that's the destination of the
+ move. */
+ temp[0] = GEN_INT (-(offset - fix));
+ if (addreg0 && REGNO (addreg0) != REGNO (wordpart[i][0]))
+ {
+ temp[1] = addreg0;
+ output_asm_insn("add %0,%1,%0", temp);
+ }
+ if (addreg1 && REGNO (addreg1) != REGNO (wordpart[i][0]))
+ {
+ temp[1] = addreg1;
+ output_asm_insn("add %0,%1,%0",temp);
+ }
+ offset = 0;
+ break;
+ }
+ }
+ if (offset)
+ {
+ temp[1] = GEN_INT (-offset);
+ /* Undo the adds we just did. */
+ if (addreg0)
+ {
+ temp[0] = addreg0;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ if (addreg1)
+ {
+ temp[0] = addreg1;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ }
+ }
+ else /* TARGET_ARCH64 */
+ {
+ if (load_late_half[0])
+ {
+ /* Load the second half first. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x8,%0", &addreg1);
+
+ output_asm_insn (doublemove_string (wordpart[1]), wordpart[1]);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,-0x8,%0", &addreg1);
+
+ output_asm_insn (doublemove_string (wordpart[0]), wordpart[0]);
+ }
+ else
+ {
+ output_asm_insn (doublemove_string (wordpart[0]), wordpart[0]);
+
+ if (addreg0)
+ output_asm_insn ("add %0,0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x8,%0", &addreg1);
+
+ /* Do the second word. */
+ output_asm_insn (doublemove_string (wordpart[1]), wordpart[1]);
+
+ /* Undo the adds we just did. But don't modify the dest of
+ the move. */
+ if (addreg0 && REGNO (addreg0) != REGNO (wordpart[1][0]))
+ output_asm_insn ("add %0,-0x8,%0", &addreg0);
+ if (addreg1 && REGNO (addreg1) != REGNO (wordpart[1][0]))
+ output_asm_insn ("add %0,-0x8,%0", &addreg1);
+ }
+ }
+
+ return "";
+}
+
+/* Output assembler code to perform a doubleword move insn with operands
+ OPERANDS, one of which must be a floating point register. */
+
+char *
+output_fp_move_double (operands)
+ rtx *operands;
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ {
+ if (TARGET_V9)
+ return "fmovd %1,%0";
+ else
+ return "fmovs %1,%0\n\tfmovs %R1,%R0";
+ }
+ else if (GET_CODE (operands[1]) == REG)
+ abort ();
+ else
+ return output_move_double (operands);
+ }
+ else if (FP_REG_P (operands[1]))
+ {
+ if (GET_CODE (operands[0]) == REG)
+ abort ();
+ else
+ return output_move_double (operands);
+ }
+ else abort ();
+}
+
+/* When doing a quad-register move, determine the drection in which
+ the move needs to be performed. SRC and DST are the source and
+ destination registers.
+
+ A value of -1 indicates that the move needs to be done from the
+ highest register to the lowest. */
+
+static int
+move_quad_direction (src, dst)
+ rtx src, dst;
+{
+ if ((REGNO (dst) > REGNO (src))
+ && (REGNO (dst) < (REGNO (src) + 4)))
+ return -1;
+ else
+ return 1;
+}
+
+/* Output assembler code to perform a quadword move insn with operands
+ OPERANDS, one of which must be a floating point register. */
+
+char *
+output_fp_move_quad (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+
+ if (FP_REG_P (op0))
+ {
+ if (FP_REG_P (op1))
+ {
+ if (TARGET_V9 && TARGET_HARD_QUAD)
+ return "fmovq %1,%0";
+ else if (TARGET_V9)
+ {
+ int dir = move_quad_direction (op1, op0);
+ if (dir > 0)
+ return "fmovd %1,%0\n\tfmovd %S1,%S0";
+ else
+ return "fmovd %S1,%S0\n\tfmovd %1,%0";
+ }
+ else
+ {
+ int dir = move_quad_direction (op0, op1);
+ if (dir > 0)
+ return "fmovs %1,%0\n\tfmovs %R1,%R0\n\tfmovs %S1,%S0\n\tfmovs %T1,%T0";
+ else
+ return "fmovs %T1,%T0\n\tfmovs %S1,%S0\n\tfmovs %R1,%R0\n\tfmovs %1,%0";
+ }
+ }
+ else if (GET_CODE (op1) == REG)
+ abort ();
+ else
+ return output_move_quad (operands);
+ }
+ else if (FP_REG_P (op1))
+ {
+ if (GET_CODE (op0) == REG)
+ abort ();
+ else
+ return output_move_quad (operands);
+ }
+ else
+ abort ();
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG. */
+
+static rtx
+find_addr_reg (addr)
+ rtx addr;
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ /* We absolutely can not fudge the frame pointer here, because the
+ frame pointer must always be 8 byte aligned. It also confuses
+ debuggers. */
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ abort ();
+ }
+ if (GET_CODE (addr) == REG)
+ return addr;
+ abort ();
+}
+
+/* Output reasonable peephole for set-on-condition-code insns.
+ Note that these insns assume a particular way of defining
+ labels. Therefore, *both* sparc.h and this function must
+ be changed if a new syntax is needed. */
+
+char *
+output_scc_insn (operands, insn)
+ rtx operands[];
+ rtx insn;
+{
+ static char string[100];
+ rtx label = 0, next = insn;
+ int need_label = 0;
+
+ /* This code used to be called with final_sequence nonzero (for fpcc
+ delay slots), but that is no longer allowed. */
+ if (final_sequence)
+ abort ();
+
+ /* On UltraSPARC a conditional moves blocks until 3 cycles after prior loads
+ complete. It might be beneficial here to use branches if any recent
+ instructions were loads. */
+ if (TARGET_V9 && REGNO (operands[1]) == SPARC_ICC_REG)
+ return "mov 0,%0\n\tmov%C2 %x1,1,%0";
+
+ /* Try doing a jump optimization which jump.c can't do for us
+ because we did not expose that setcc works by using branches.
+
+ If this scc insn is followed by an unconditional branch, then have
+ the jump insn emitted here jump to that location, instead of to
+ the end of the scc sequence as usual. */
+
+ do
+ {
+ if (GET_CODE (next) == CODE_LABEL)
+ label = next;
+ next = NEXT_INSN (next);
+ }
+ while (next && (GET_CODE (next) == NOTE || GET_CODE (next) == CODE_LABEL));
+
+ if (next && GET_CODE (next) == JUMP_INSN && simplejump_p (next))
+ label = JUMP_LABEL (next);
+
+ /* If not optimizing, jump label fields are not set. To be safe, always
+ check here to whether label is still zero. */
+ if (label == 0)
+ {
+ label = gen_label_rtx ();
+ need_label = 1;
+ }
+
+ LABEL_NUSES (label) += 1;
+
+ /* operands[3] is an unused slot. */
+ operands[3] = label;
+
+ strcpy (string, output_cbranch (operands[2], 3, 0, 1, 0, 0));
+ strcat (string, "\n\tmov 1,%0\n\tmov 0,%0");
+
+ if (need_label)
+ strcat (string, "\n%l3:");
+
+ return string;
+}
+
+/* Vectors to keep interesting information about registers where it can easily
+ be got. We use to use the actual mode value as the bit number, but there
+ are more than 32 modes now. Instead we use two tables: one indexed by
+ hard register number, and one indexed by mode. */
+
+/* The purpose of sparc_mode_class is to shrink the range of modes so that
+ they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
+ mapped into one sparc_mode_class mode. */
+
+enum sparc_mode_class {
+ S_MODE, D_MODE, T_MODE, O_MODE,
+ SF_MODE, DF_MODE, TF_MODE, OF_MODE,
+ CC_MODE, CCFP_MODE
+};
+
+/* Modes for single-word and smaller quantities. */
+#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
+
+/* Modes for double-word and smaller quantities. */
+#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
+
+/* Modes for quad-word and smaller quantities. */
+#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
+
+/* Modes for single-float quantities. We must allow any single word or
+ smaller quantity. This is because the fix/float conversion instructions
+ take integer inputs/outputs from the float registers. */
+#define SF_MODES (S_MODES)
+
+/* Modes for double-float and smaller quantities. */
+#define DF_MODES (S_MODES | D_MODES)
+
+#define DF_MODES64 DF_MODES
+
+/* Modes for double-float only quantities. */
+#define DF_ONLY_MODES ((1 << (int) DF_MODE) | (1 << (int) D_MODE))
+
+/* Modes for double-float and larger quantities. */
+#define DF_UP_MODES (DF_ONLY_MODES | TF_ONLY_MODES)
+
+/* Modes for quad-float only quantities. */
+#define TF_ONLY_MODES (1 << (int) TF_MODE)
+
+/* Modes for quad-float and smaller quantities. */
+#define TF_MODES (DF_MODES | TF_ONLY_MODES)
+
+#define TF_MODES64 (DF_MODES64 | TF_ONLY_MODES)
+
+/* Modes for condition codes. */
+#define CC_MODES (1 << (int) CC_MODE)
+#define CCFP_MODES (1 << (int) CCFP_MODE)
+
+/* Value is 1 if register/mode pair is acceptable on sparc.
+ The funny mixture of D and T modes is because integer operations
+ do not specially operate on tetra quantities, so non-quad-aligned
+ registers can hold quadword quantities (except %o4 and %i4 because
+ they cross fixed registers). */
+
+/* This points to either the 32 bit or the 64 bit version. */
+int *hard_regno_mode_classes;
+
+static int hard_32bit_mode_classes[] = {
+ S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+
+ /* FP regs f32 to f63. Only the even numbered registers actually exist,
+ and none can hold SFmode/SImode values. */
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+
+ /* %fcc[0123] */
+ CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
+
+ /* %icc */
+ CC_MODES
+};
+
+static int hard_64bit_mode_classes[] = {
+ D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+
+ /* FP regs f32 to f63. Only the even numbered registers actually exist,
+ and none can hold SFmode/SImode values. */
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+
+ /* %fcc[0123] */
+ CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
+
+ /* %icc */
+ CC_MODES
+};
+
+int sparc_mode_class [NUM_MACHINE_MODES];
+
+enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
+
+static void
+sparc_init_modes ()
+{
+ int i;
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ if (GET_MODE_SIZE (i) <= 4)
+ sparc_mode_class[i] = 1 << (int) S_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ sparc_mode_class[i] = 1 << (int) D_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ sparc_mode_class[i] = 1 << (int) T_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ sparc_mode_class[i] = 1 << (int) O_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ if (GET_MODE_SIZE (i) <= 4)
+ sparc_mode_class[i] = 1 << (int) SF_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ sparc_mode_class[i] = 1 << (int) DF_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ sparc_mode_class[i] = 1 << (int) TF_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ sparc_mode_class[i] = 1 << (int) OF_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ case MODE_CC:
+ default:
+ /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
+ we must explicitly check for them here. */
+ if (i == (int) CCFPmode || i == (int) CCFPEmode)
+ sparc_mode_class[i] = 1 << (int) CCFP_MODE;
+ else if (i == (int) CCmode || i == (int) CC_NOOVmode
+ || i == (int) CCXmode || i == (int) CCX_NOOVmode)
+ sparc_mode_class[i] = 1 << (int) CC_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ }
+ }
+
+ if (TARGET_ARCH64)
+ hard_regno_mode_classes = hard_64bit_mode_classes;
+ else
+ hard_regno_mode_classes = hard_32bit_mode_classes;
+
+ /* Initialize the array used by REGNO_REG_CLASS. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (i < 16 && TARGET_V8PLUS)
+ sparc_regno_reg_class[i] = I64_REGS;
+ else if (i < 32)
+ sparc_regno_reg_class[i] = GENERAL_REGS;
+ else if (i < 64)
+ sparc_regno_reg_class[i] = FP_REGS;
+ else if (i < 96)
+ sparc_regno_reg_class[i] = EXTRA_FP_REGS;
+ else if (i < 100)
+ sparc_regno_reg_class[i] = FPCC_REGS;
+ else
+ sparc_regno_reg_class[i] = NO_REGS;
+ }
+}
+
+/* Save non call used registers from LOW to HIGH at BASE+OFFSET.
+ N_REGS is the number of 4-byte regs saved thus far. This applies even to
+ v9 int regs as it simplifies the code. */
+
+static int
+save_regs (file, low, high, base, offset, n_regs, real_offset)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+ int n_regs;
+ int real_offset;
+{
+ int i;
+
+ if (TARGET_ARCH64 && high <= 32)
+ {
+ for (i = low; i < high; i++)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ {
+ fprintf (file, "\tstx %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
+ n_regs += 2;
+ }
+ }
+ }
+ else
+ {
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ {
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ {
+ fprintf (file, "\tstd %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ dwarf2out_reg_save (l, i, real_offset + 4 * n_regs);
+ dwarf2out_reg_save (l, i+1, real_offset + 4 * n_regs + 4);
+ }
+ n_regs += 2;
+ }
+ else
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
+ n_regs += 2;
+ }
+ }
+ else
+ {
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i+1], base, offset + 4 * n_regs + 4);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i + 1, real_offset + 4 * n_regs + 4);
+ n_regs += 2;
+ }
+ }
+ }
+ }
+ return n_regs;
+}
+
+/* Restore non call used registers from LOW to HIGH at BASE+OFFSET.
+
+ N_REGS is the number of 4-byte regs saved thus far. This applies even to
+ v9 int regs as it simplifies the code. */
+
+static int
+restore_regs (file, low, high, base, offset, n_regs)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+ int n_regs;
+{
+ int i;
+
+ if (TARGET_ARCH64 && high <= 32)
+ {
+ for (i = low; i < high; i++)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ fprintf (file, "\tldx [%s+%d], %s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ }
+ }
+ else
+ {
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tldd [%s+%d], %s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ else
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ else if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_regs + 4, reg_names[i+1]),
+ n_regs += 2;
+ }
+ }
+ return n_regs;
+}
+
+/* Static variables we want to share between prologue and epilogue. */
+
+/* Number of live general or floating point registers needed to be saved
+ (as 4-byte quantities). This is only done if TARGET_EPILOGUE. */
+static int num_gfregs;
+
+/* Compute the frame size required by the function. This function is called
+ during the reload pass and also by output_function_prologue(). */
+
+int
+compute_frame_size (size, leaf_function)
+ int size;
+ int leaf_function;
+{
+ int n_regs = 0, i;
+ int outgoing_args_size = (current_function_outgoing_args_size
+ + REG_PARM_STACK_SPACE (current_function_decl));
+
+ if (TARGET_EPILOGUE)
+ {
+ /* N_REGS is the number of 4-byte regs saved thus far. This applies
+ even to v9 int regs to be consistent with save_regs/restore_regs. */
+
+ if (TARGET_ARCH64)
+ {
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ n_regs += 2;
+ }
+ else
+ {
+ for (i = 0; i < 8; i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_regs += 2;
+ }
+
+ for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_regs += 2;
+ }
+
+ /* Set up values for use in `function_epilogue'. */
+ num_gfregs = n_regs;
+
+ if (leaf_function && n_regs == 0
+ && size == 0 && current_function_outgoing_args_size == 0)
+ {
+ actual_fsize = apparent_fsize = 0;
+ }
+ else
+ {
+ /* We subtract STARTING_FRAME_OFFSET, remember it's negative.
+ The stack bias (if any) is taken out to undo its effects. */
+ apparent_fsize = (size - STARTING_FRAME_OFFSET + SPARC_STACK_BIAS + 7) & -8;
+ apparent_fsize += n_regs * 4;
+ actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
+ }
+
+ /* Make sure nothing can clobber our register windows.
+ If a SAVE must be done, or there is a stack-local variable,
+ the register window area must be allocated.
+ ??? For v8 we apparently need an additional 8 bytes of reserved space. */
+ if (leaf_function == 0 || size > 0)
+ actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
+
+ return SPARC_STACK_ALIGN (actual_fsize);
+}
+
+/* Build a (32 bit) big number in a register. */
+/* ??? We may be able to use the set macro here too. */
+
+static void
+build_big_number (file, num, reg)
+ FILE *file;
+ int num;
+ char *reg;
+{
+ if (num >= 0 || ! TARGET_ARCH64)
+ {
+ fprintf (file, "\tsethi %%hi(%d),%s\n", num, reg);
+ if ((num & 0x3ff) != 0)
+ fprintf (file, "\tor %s,%%lo(%d),%s\n", reg, num, reg);
+ }
+ else /* num < 0 && TARGET_ARCH64 */
+ {
+ /* Sethi does not sign extend, so we must use a little trickery
+ to use it for negative numbers. Invert the constant before
+ loading it in, then use xor immediate to invert the loaded bits
+ (along with the upper 32 bits) to the desired constant. This
+ works because the sethi and immediate fields overlap. */
+ int asize = num;
+ int inv = ~asize;
+ int low = -0x400 + (asize & 0x3FF);
+
+ fprintf (file, "\tsethi %%hi(%d),%s\n\txor %s,%d,%s\n",
+ inv, reg, reg, low, reg);
+ }
+}
+
+/* Output code for the function prologue. */
+
+void
+output_function_prologue (file, size, leaf_function)
+ FILE *file;
+ int size;
+ int leaf_function;
+{
+ /* Need to use actual_fsize, since we are also allocating
+ space for our callee (and our own register save area). */
+ actual_fsize = compute_frame_size (size, leaf_function);
+
+ if (leaf_function)
+ {
+ frame_base_name = "%sp";
+ frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
+ }
+ else
+ {
+ frame_base_name = "%fp";
+ frame_base_offset = SPARC_STACK_BIAS;
+ }
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
+
+ if (actual_fsize == 0)
+ /* do nothing. */ ;
+ else if (! leaf_function && ! TARGET_BROKEN_SAVERESTORE)
+ {
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tsave %%sp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tsave %%sp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tsave %%sp,%%g1,%%sp\n");
+ }
+ }
+ else if (! leaf_function && TARGET_BROKEN_SAVERESTORE)
+ {
+ /* We assume the environment will properly handle or otherwise avoid
+ trouble associated with an interrupt occurring after the `save' or
+ trap occurring during it. */
+ fprintf (file, "\tsave\n");
+
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tadd %%fp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tadd %%fp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%fp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tadd %%fp,%%g1,%%sp\n");
+ }
+ }
+ else /* leaf function */
+ {
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tadd %%sp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tadd %%sp,%%g1,%%sp\n");
+ }
+ }
+
+ if (dwarf2out_do_frame () && actual_fsize)
+ {
+ char *label = dwarf2out_cfi_label ();
+
+ /* The canonical frame address refers to the top of the frame. */
+ dwarf2out_def_cfa (label, (leaf_function ? STACK_POINTER_REGNUM
+ : FRAME_POINTER_REGNUM),
+ frame_base_offset);
+
+ if (! leaf_function)
+ {
+ /* Note the register window save. This tells the unwinder that
+ it needs to restore the window registers from the previous
+ frame's window save area at 0(cfa). */
+ dwarf2out_window_save (label);
+
+ /* The return address (-8) is now in %i7. */
+ dwarf2out_return_reg (label, 31);
+ }
+ }
+
+ /* If doing anything with PIC, do it now. */
+ if (! flag_pic)
+ fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
+
+ /* Call saved registers are saved just above the outgoing argument area. */
+ if (num_gfregs)
+ {
+ int offset, real_offset, n_regs;
+ char *base;
+
+ real_offset = -apparent_fsize;
+ offset = -apparent_fsize + frame_base_offset;
+ if (offset < -4096 || offset + num_gfregs * 4 > 4096)
+ {
+ /* ??? This might be optimized a little as %g1 might already have a
+ value close enough that a single add insn will do. */
+ /* ??? Although, all of this is probably only a temporary fix
+ because if %g1 can hold a function result, then
+ output_function_epilogue will lose (the result will get
+ clobbered). */
+ build_big_number (file, offset, "%g1");
+ fprintf (file, "\tadd %s,%%g1,%%g1\n", frame_base_name);
+ base = "%g1";
+ offset = 0;
+ }
+ else
+ {
+ base = frame_base_name;
+ }
+
+ n_regs = 0;
+ if (TARGET_EPILOGUE && ! leaf_function)
+ /* ??? Originally saved regs 0-15 here. */
+ n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
+ else if (leaf_function)
+ /* ??? Originally saved regs 0-31 here. */
+ n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
+ if (TARGET_EPILOGUE)
+ save_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs,
+ real_offset);
+ }
+
+ leaf_label = 0;
+ if (leaf_function && actual_fsize != 0)
+ {
+ /* warning ("leaf procedure with frame size %d", actual_fsize); */
+ if (! TARGET_EPILOGUE)
+ leaf_label = gen_label_rtx ();
+ }
+}
+
+/* Output code for the function epilogue. */
+
+void
+output_function_epilogue (file, size, leaf_function)
+ FILE *file;
+ int size ATTRIBUTE_UNUSED;
+ int leaf_function;
+{
+ char *ret;
+
+ if (leaf_label)
+ {
+ emit_label_after (leaf_label, get_last_insn ());
+ final_scan_insn (get_last_insn (), file, 0, 0, 1);
+ }
+
+#ifdef FUNCTION_BLOCK_PROFILER_EXIT
+ else if (profile_block_flag == 2)
+ {
+ FUNCTION_BLOCK_PROFILER_EXIT(file);
+ }
+#endif
+
+ else if (current_function_epilogue_delay_list == 0)
+ {
+ /* If code does not drop into the epilogue, do nothing. */
+ rtx insn = get_last_insn ();
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ return;
+ }
+
+ /* Restore any call saved registers. */
+ if (num_gfregs)
+ {
+ int offset, n_regs;
+ char *base;
+
+ offset = -apparent_fsize + frame_base_offset;
+ if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
+ {
+ build_big_number (file, offset, "%g1");
+ fprintf (file, "\tadd %s,%%g1,%%g1\n", frame_base_name);
+ base = "%g1";
+ offset = 0;
+ }
+ else
+ {
+ base = frame_base_name;
+ }
+
+ n_regs = 0;
+ if (TARGET_EPILOGUE && ! leaf_function)
+ /* ??? Originally saved regs 0-15 here. */
+ n_regs = restore_regs (file, 0, 8, base, offset, 0);
+ else if (leaf_function)
+ /* ??? Originally saved regs 0-31 here. */
+ n_regs = restore_regs (file, 0, 8, base, offset, 0);
+ if (TARGET_EPILOGUE)
+ restore_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs);
+ }
+
+ /* Work out how to skip the caller's unimp instruction if required. */
+ if (leaf_function)
+ ret = (SKIP_CALLERS_UNIMP_P ? "jmp %o7+12" : "retl");
+ else
+ ret = (SKIP_CALLERS_UNIMP_P ? "jmp %i7+12" : "ret");
+
+ if (TARGET_EPILOGUE || leaf_label)
+ {
+ int old_target_epilogue = TARGET_EPILOGUE;
+ target_flags &= ~old_target_epilogue;
+
+ if (! leaf_function)
+ {
+ /* If we wound up with things in our delay slot, flush them here. */
+ if (current_function_epilogue_delay_list)
+ {
+ rtx insn = emit_jump_insn_after (gen_rtx_RETURN (VOIDmode),
+ get_last_insn ());
+ PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ PATTERN (XEXP (current_function_epilogue_delay_list, 0)),
+ PATTERN (insn)));
+ final_scan_insn (insn, file, 1, 0, 1);
+ }
+ else if (TARGET_V9 && ! SKIP_CALLERS_UNIMP_P)
+ fputs ("\treturn %i7+8\n\tnop\n", file);
+ else
+ fprintf (file, "\t%s\n\trestore\n", ret);
+ }
+ /* All of the following cases are for leaf functions. */
+ else if (current_function_epilogue_delay_list)
+ {
+ /* eligible_for_epilogue_delay_slot ensures that if this is a
+ leaf function, then we will only have insn in the delay slot
+ if the frame size is zero, thus no adjust for the stack is
+ needed here. */
+ if (actual_fsize != 0)
+ abort ();
+ fprintf (file, "\t%s\n", ret);
+ final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
+ file, 1, 0, 1);
+ }
+ /* Output 'nop' instead of 'sub %sp,-0,%sp' when no frame, so as to
+ avoid generating confusing assembly language output. */
+ else if (actual_fsize == 0)
+ fprintf (file, "\t%s\n\tnop\n", ret);
+ else if (actual_fsize <= 4096)
+ fprintf (file, "\t%s\n\tsub %%sp,-%d,%%sp\n", ret, actual_fsize);
+ else if (actual_fsize <= 8192)
+ fprintf (file, "\tsub %%sp,-4096,%%sp\n\t%s\n\tsub %%sp,-%d,%%sp\n",
+ ret, actual_fsize - 4096);
+ else if ((actual_fsize & 0x3ff) == 0)
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\t%s\n\tadd %%sp,%%g1,%%sp\n",
+ actual_fsize, ret);
+ else
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\tor %%g1,%%lo(%d),%%g1\n\t%s\n\tadd %%sp,%%g1,%%sp\n",
+ actual_fsize, actual_fsize, ret);
+ target_flags |= old_target_epilogue;
+ }
+}
+
+/* Functions for handling argument passing.
+
+ For v8 the first six args are normally in registers and the rest are
+ pushed. Any arg that starts within the first 6 words is at least
+ partially passed in a register unless its data type forbids.
+
+ For v9, the argument registers are laid out as an array of 16 elements
+ and arguments are added sequentially. The first 6 int args and up to the
+ first 16 fp args (depending on size) are passed in regs.
+
+ Slot Stack Integral Float Float in structure Double Long Double
+ ---- ----- -------- ----- ------------------ ------ -----------
+ 15 [SP+248] %f31 %f30,%f31 %d30
+ 14 [SP+240] %f29 %f28,%f29 %d28 %q28
+ 13 [SP+232] %f27 %f26,%f27 %d26
+ 12 [SP+224] %f25 %f24,%f25 %d24 %q24
+ 11 [SP+216] %f23 %f22,%f23 %d22
+ 10 [SP+208] %f21 %f20,%f21 %d20 %q20
+ 9 [SP+200] %f19 %f18,%f19 %d18
+ 8 [SP+192] %f17 %f16,%f17 %d16 %q16
+ 7 [SP+184] %f15 %f14,%f15 %d14
+ 6 [SP+176] %f13 %f12,%f13 %d12 %q12
+ 5 [SP+168] %o5 %f11 %f10,%f11 %d10
+ 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
+ 3 [SP+152] %o3 %f7 %f6,%f7 %d6
+ 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
+ 1 [SP+136] %o1 %f3 %f2,%f3 %d2
+ 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
+
+ Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
+
+ Integral arguments are always passed as 64 bit quantities appropriately
+ extended.
+
+ Passing of floating point values is handled as follows.
+ If a prototype is in scope:
+ If the value is in a named argument (i.e. not a stdarg function or a
+ value not part of the `...') then the value is passed in the appropriate
+ fp reg.
+ If the value is part of the `...' and is passed in one of the first 6
+ slots then the value is passed in the appropriate int reg.
+ If the value is part of the `...' and is not passed in one of the first 6
+ slots then the value is passed in memory.
+ If a prototype is not in scope:
+ If the value is one of the first 6 arguments the value is passed in the
+ appropriate integer reg and the appropriate fp reg.
+ If the value is not one of the first 6 arguments the value is passed in
+ the appropriate fp reg and in memory.
+ */
+
+/* Maximum number of int regs for args. */
+#define SPARC_INT_ARG_MAX 6
+/* Maximum number of fp regs for args. */
+#define SPARC_FP_ARG_MAX 16
+
+#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Handle the INIT_CUMULATIVE_ARGS macro.
+ Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+void
+init_cumulative_args (cum, fntype, libname, indirect)
+ CUMULATIVE_ARGS *cum;
+ tree fntype;
+ tree libname ATTRIBUTE_UNUSED;
+ int indirect ATTRIBUTE_UNUSED;
+{
+ cum->words = 0;
+ cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
+ cum->libcall_p = fntype == 0;
+}
+
+/* Compute the slot number to pass an argument in.
+ Returns the slot number or -1 if passing on the stack.
+
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+ INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
+ *PREGNO records the register number to use if scalar type.
+ *PPADDING records the amount of padding needed in words. */
+
+static int
+function_arg_slotno (cum, mode, type, named, incoming_p, pregno, ppadding)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+ int incoming_p;
+ int *pregno;
+ int *ppadding;
+{
+ int regbase = (incoming_p
+ ? SPARC_INCOMING_INT_ARG_FIRST
+ : SPARC_OUTGOING_INT_ARG_FIRST);
+ int slotno = cum->words;
+ int regno;
+
+ *ppadding = 0;
+
+ if (type != 0 && TREE_ADDRESSABLE (type))
+ return -1;
+ if (TARGET_ARCH32
+ && type != 0 && mode == BLKmode
+ && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
+ return -1;
+
+ switch (mode)
+ {
+ case VOIDmode :
+ /* MODE is VOIDmode when generating the actual call.
+ See emit_call_1. */
+ return -1;
+
+ case QImode : case CQImode :
+ case HImode : case CHImode :
+ case SImode : case CSImode :
+ case DImode : case CDImode :
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ break;
+
+ case SFmode : case SCmode :
+ case DFmode : case DCmode :
+ case TFmode : case TCmode :
+ if (TARGET_ARCH32)
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ else
+ {
+ if ((mode == TFmode || mode == TCmode)
+ && (slotno & 1) != 0)
+ slotno++, *ppadding = 1;
+ if (TARGET_FPU && named)
+ {
+ if (slotno >= SPARC_FP_ARG_MAX)
+ return -1;
+ regno = SPARC_FP_ARG_FIRST + slotno * 2;
+ if (mode == SFmode)
+ regno++;
+ }
+ else
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ }
+ break;
+
+ case BLKmode :
+ /* For sparc64, objects requiring 16 byte alignment get it. */
+ if (TARGET_ARCH64)
+ {
+ if (type && TYPE_ALIGN (type) == 128 && (slotno & 1) != 0)
+ slotno++, *ppadding = 1;
+ }
+
+ if (TARGET_ARCH32
+ || (type && TREE_CODE (type) == UNION_TYPE))
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ else
+ {
+ tree field;
+ int intregs_p = 0, fpregs_p = 0;
+ /* The ABI obviously doesn't specify how packed
+ structures are passed. These are defined to be passed
+ in int regs if possible, otherwise memory. */
+ int packed_p = 0;
+
+ /* First see what kinds of registers we need. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU)
+ fpregs_p = 1;
+ else
+ intregs_p = 1;
+ if (DECL_PACKED (field))
+ packed_p = 1;
+ }
+ }
+ if (packed_p || !named)
+ fpregs_p = 0, intregs_p = 1;
+
+ /* If all arg slots are filled, then must pass on stack. */
+ if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
+ return -1;
+ /* If there are only int args and all int arg slots are filled,
+ then must pass on stack. */
+ if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ /* Note that even if all int arg slots are filled, fp members may
+ still be passed in regs if such regs are available.
+ *PREGNO isn't set because there may be more than one, it's up
+ to the caller to compute them. */
+ return slotno;
+ }
+ break;
+
+ default :
+ abort ();
+ }
+
+ *pregno = regno;
+ return slotno;
+}
+
+/* Handle recursive register counting for structure field layout. */
+
+struct function_arg_record_value_parms
+{
+ rtx ret;
+ int slotno, named, regbase;
+ int nregs, intoffset;
+};
+
+static void
+function_arg_record_value_1 (type, startbitpos, parms)
+ tree type;
+ int startbitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ tree field;
+
+ /* The ABI obviously doesn't specify how packed structures are
+ passed. These are defined to be passed in int regs if possible,
+ otherwise memory. */
+ int packed_p = 0;
+
+ /* We need to compute how many registers are needed so we can
+ allocate the PARALLEL but before we can do that we need to know
+ whether there are any packed fields. If there are, int regs are
+ used regardless of whether there are fp values present. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
+ {
+ packed_p = 1;
+ break;
+ }
+ }
+
+ /* Compute how many registers we need. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int bitpos = startbitpos;
+ if (DECL_FIELD_BITPOS (field))
+ bitpos += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
+ {
+ function_arg_record_value_1 (TREE_TYPE (field), bitpos, parms);
+ }
+ else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU
+ && ! packed_p
+ && parms->named)
+ {
+ if (parms->intoffset != -1)
+ {
+ int intslots, this_slotno;
+
+ intslots = (bitpos - parms->intoffset + BITS_PER_WORD - 1)
+ / BITS_PER_WORD;
+ this_slotno = parms->slotno + parms->intoffset
+ / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ intslots = MAX (intslots, 0);
+ parms->nregs += intslots;
+ parms->intoffset = -1;
+ }
+
+ /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
+ If it wasn't true we wouldn't be here. */
+ parms->nregs += 1;
+ }
+ else
+ {
+ if (parms->intoffset == -1)
+ parms->intoffset = bitpos;
+ }
+ }
+ }
+}
+
+/* Handle recursive structure field register assignment. */
+
+static void
+function_arg_record_value_3 (bitpos, parms)
+ int bitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ enum machine_mode mode;
+ int regno, this_slotno, intslots, intoffset;
+ rtx reg;
+
+ if (parms->intoffset == -1)
+ return;
+ intoffset = parms->intoffset;
+ parms->intoffset = -1;
+
+ intslots = (bitpos - intoffset + BITS_PER_WORD - 1) / BITS_PER_WORD;
+ this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ if (intslots <= 0)
+ return;
+
+ /* If this is the trailing part of a word, only load that much into
+ the register. Otherwise load the whole register. Note that in
+ the latter case we may pick up unwanted bits. It's not a problem
+ at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset%BITS_PER_WORD,
+ MODE_INT, 0);
+ }
+ else
+ mode = word_mode;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = parms->regbase + this_slotno;
+ reg = gen_rtx_REG (mode, regno);
+ XVECEXP (parms->ret, 0, parms->nregs)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_slotno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ parms->nregs += 1;
+ intslots -= 1;
+ }
+ while (intslots > 0);
+}
+
+static void
+function_arg_record_value_2 (type, startbitpos, parms)
+ tree type;
+ int startbitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ tree field;
+ int packed_p = 0;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
+ {
+ packed_p = 1;
+ break;
+ }
+ }
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int bitpos = startbitpos;
+ if (DECL_FIELD_BITPOS (field))
+ bitpos += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
+ {
+ function_arg_record_value_2 (TREE_TYPE (field), bitpos, parms);
+ }
+ else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU
+ && ! packed_p
+ && parms->named)
+ {
+ int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
+ rtx reg;
+
+ function_arg_record_value_3 (bitpos, parms);
+
+ reg = gen_rtx_REG (DECL_MODE (field),
+ (SPARC_FP_ARG_FIRST + this_slotno * 2
+ + (DECL_MODE (field) == SFmode
+ && (bitpos & 32) != 0)));
+ XVECEXP (parms->ret, 0, parms->nregs)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ parms->nregs += 1;
+ }
+ else
+ {
+ if (parms->intoffset == -1)
+ parms->intoffset = bitpos;
+ }
+ }
+ }
+}
+
+static rtx
+function_arg_record_value (type, mode, slotno, named, regbase)
+ tree type;
+ enum machine_mode mode;
+ int slotno, named, regbase;
+{
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ struct function_arg_record_value_parms parms;
+ int nregs;
+
+ parms.ret = NULL_RTX;
+ parms.slotno = slotno;
+ parms.named = named;
+ parms.regbase = regbase;
+
+ /* Compute how many registers we need. */
+ parms.nregs = 0;
+ parms.intoffset = 0;
+ function_arg_record_value_1 (type, 0, &parms);
+
+ if (parms.intoffset != -1)
+ {
+ int intslots, this_slotno;
+
+ intslots = (typesize*BITS_PER_UNIT - parms.intoffset + BITS_PER_WORD - 1)
+ / BITS_PER_WORD;
+ this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ intslots = MAX (intslots, 0);
+
+ parms.nregs += intslots;
+ }
+ nregs = parms.nregs;
+
+ /* Allocate the vector and handle some annoying special cases. */
+ if (nregs == 0)
+ {
+ /* ??? Empty structure has no value? Duh? */
+ if (typesize <= 0)
+ {
+ /* Though there's nothing really to store, return a word register
+ anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
+ leads to breakage due to the fact that there are zero bytes to
+ load. */
+ return gen_rtx_REG (mode, regbase);
+ }
+ else
+ {
+ /* ??? C++ has structures with no fields, and yet a size. Give up
+ for now and pass everything back in integer registers. */
+ nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ }
+ if (nregs + slotno > SPARC_INT_ARG_MAX)
+ nregs = SPARC_INT_ARG_MAX - slotno;
+ }
+ if (nregs == 0)
+ abort();
+
+ parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+
+ /* Fill in the entries. */
+ parms.nregs = 0;
+ parms.intoffset = 0;
+ function_arg_record_value_2 (type, 0, &parms);
+ function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
+
+ if (parms.nregs != nregs)
+ abort ();
+
+ return parms.ret;
+}
+
+/* Handle the FUNCTION_ARG macro.
+ Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+ INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
+
+rtx
+function_arg (cum, mode, type, named, incoming_p)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+ int incoming_p;
+{
+ int regbase = (incoming_p
+ ? SPARC_INCOMING_INT_ARG_FIRST
+ : SPARC_OUTGOING_INT_ARG_FIRST);
+ int slotno, regno, padding;
+ rtx reg;
+
+ slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
+ &regno, &padding);
+
+ if (slotno == -1)
+ return 0;
+
+ if (TARGET_ARCH32)
+ {
+ reg = gen_rtx_REG (mode, regno);
+ return reg;
+ }
+
+ /* v9 fp args in reg slots beyond the int reg slots get passed in regs
+ but also have the slot allocated for them.
+ If no prototype is in scope fp values in register slots get passed
+ in two places, either fp regs and int regs or fp regs and memory. */
+ if ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && SPARC_FP_REG_P (regno))
+ {
+ reg = gen_rtx_REG (mode, regno);
+ if (cum->prototype_p || cum->libcall_p)
+ {
+ /* "* 2" because fp reg numbers are recorded in 4 byte
+ quantities. */
+#if 0
+ /* ??? This will cause the value to be passed in the fp reg and
+ in the stack. When a prototype exists we want to pass the
+ value in the reg but reserve space on the stack. That's an
+ optimization, and is deferred [for a bit]. */
+ if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ NULL_RTX, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ reg, const0_rtx)));
+ else
+#else
+ /* ??? It seems that passing back a register even when past
+ the area declared by REG_PARM_STACK_SPACE will allocate
+ space appropriately, and will not copy the data onto the
+ stack, exactly as we desire.
+
+ This is due to locate_and_pad_parm being called in
+ expand_call whenever reg_parm_stack_space > 0, which
+ while benefical to our example here, would seem to be
+ in error from what had been intended. Ho hum... -- r~ */
+#endif
+ return reg;
+ }
+ else
+ {
+ rtx v0, v1;
+
+ if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
+ {
+ int intreg;
+
+ /* On incoming, we don't need to know that the value
+ is passed in %f0 and %i0, and it confuses other parts
+ causing needless spillage even on the simplest cases. */
+ if (incoming_p)
+ return reg;
+
+ intreg = (SPARC_OUTGOING_INT_ARG_FIRST
+ + (regno - SPARC_FP_ARG_FIRST) / 2);
+
+ v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+ v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
+ const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
+ }
+ else
+ {
+ v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
+ }
+ }
+ }
+ else if (type && TREE_CODE (type) == RECORD_TYPE)
+ {
+ /* Structures up to 16 bytes in size are passed in arg slots on the
+ stack and are promoted to registers where possible. */
+
+ if (int_size_in_bytes (type) > 16)
+ abort (); /* shouldn't get here */
+
+ return function_arg_record_value (type, mode, slotno, named, regbase);
+ }
+ else if (type && TREE_CODE (type) == UNION_TYPE)
+ {
+ enum machine_mode mode;
+ int bytes = int_size_in_bytes (type);
+
+ if (bytes > 16)
+ abort ();
+
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
+ reg = gen_rtx_REG (mode, regno);
+ }
+ else
+ {
+ /* Scalar or complex int. */
+ reg = gen_rtx_REG (mode, regno);
+ }
+
+ return reg;
+}
+
+/* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
+ For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero.
+
+ Any arg that starts in the first 6 regs but won't entirely fit in them
+ needs partial registers on v8. On v9, structures with integer
+ values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
+ values that begin in the last fp reg [where "last fp reg" varies with the
+ mode] will be split between that reg and memory. */
+
+int
+function_arg_partial_nregs (cum, mode, type, named)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int slotno, regno, padding;
+
+ /* We pass 0 for incoming_p here, it doesn't matter. */
+ slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
+
+ if (slotno == -1)
+ return 0;
+
+ if (TARGET_ARCH32)
+ {
+ if ((slotno + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
+ > NPARM_REGS (SImode))
+ return NPARM_REGS (SImode) - slotno;
+ return 0;
+ }
+ else
+ {
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ int size = int_size_in_bytes (type);
+ int align = TYPE_ALIGN (type);
+
+ if (align == 16)
+ slotno += slotno & 1;
+ if (size > 8 && size <= 16
+ && slotno == SPARC_INT_ARG_MAX - 1)
+ return 1;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
+ || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ && ! TARGET_FPU))
+ {
+ if (GET_MODE_ALIGNMENT (mode) == 128)
+ {
+ slotno += slotno & 1;
+ if (slotno == SPARC_INT_ARG_MAX - 2)
+ return 1;
+ }
+ else
+ {
+ if (slotno == SPARC_INT_ARG_MAX - 1)
+ return 1;
+ }
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ if (GET_MODE_ALIGNMENT (mode) == 128)
+ slotno += slotno & 1;
+ if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
+ > SPARC_FP_ARG_MAX)
+ return 1;
+ }
+ return 0;
+ }
+}
+
+/* Handle the FUNCTION_ARG_PASS_BY_REFERENCE macro.
+ !v9: The SPARC ABI stipulates passing struct arguments (of any size) and
+ quad-precision floats by invisible reference.
+ v9: Aggregates greater than 16 bytes are passed by reference.
+ For Pascal, also pass arrays by reference. */
+
+int
+function_arg_pass_by_reference (cum, mode, type, named)
+ const CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
+ enum machine_mode mode;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ if (TARGET_ARCH32)
+ {
+ return ((type && AGGREGATE_TYPE_P (type))
+ || mode == TFmode || mode == TCmode);
+ }
+ else
+ {
+ return ((type && TREE_CODE (type) == ARRAY_TYPE)
+ /* Consider complex values as aggregates, so care for TCmode. */
+ || GET_MODE_SIZE (mode) > 16
+ || (type && AGGREGATE_TYPE_P (type)
+ && int_size_in_bytes (type) > 16));
+ }
+}
+
+/* Handle the FUNCTION_ARG_ADVANCE macro.
+ Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ TYPE is null for libcalls where that information may not be available. */
+
+void
+function_arg_advance (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int slotno, regno, padding;
+
+ /* We pass 0 for incoming_p here, it doesn't matter. */
+ slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
+
+ /* If register required leading padding, add it. */
+ if (slotno != -1)
+ cum->words += padding;
+
+ if (TARGET_ARCH32)
+ {
+ cum->words += (mode != BLKmode
+ ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
+ : ROUND_ADVANCE (int_size_in_bytes (type)));
+ }
+ else
+ {
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ int size = int_size_in_bytes (type);
+
+ if (size <= 8)
+ ++cum->words;
+ else if (size <= 16)
+ cum->words += 2;
+ else /* passed by reference */
+ ++cum->words;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ {
+ cum->words += 2;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ cum->words += GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ }
+ else
+ {
+ cum->words += (mode != BLKmode
+ ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
+ : ROUND_ADVANCE (int_size_in_bytes (type)));
+ }
+ }
+}
+
+/* Handle the FUNCTION_ARG_PADDING macro.
+ For the 64 bit ABI structs are always stored left shifted in their
+ argument slot. */
+
+enum direction
+function_arg_padding (mode, type)
+ enum machine_mode mode;
+ tree type;
+{
+ if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
+ return upward;
+
+ /* This is the default definition. */
+ return (! BYTES_BIG_ENDIAN
+ ? upward
+ : ((mode == BLKmode
+ ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT))
+ : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
+ ? downward : upward));
+}
+
+/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
+ For v9, function return values are subject to the same rules as arguments,
+ except that up to 32-bytes may be returned in registers. */
+
+rtx
+function_value (type, mode, incoming_p)
+ tree type;
+ enum machine_mode mode;
+ int incoming_p;
+{
+ int regno;
+ int regbase = (incoming_p
+ ? SPARC_OUTGOING_INT_ARG_FIRST
+ : SPARC_INCOMING_INT_ARG_FIRST);
+
+ if (TARGET_ARCH64 && type)
+ {
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ /* Structures up to 32 bytes in size are passed in registers,
+ promoted to fp registers where possible. */
+
+ if (int_size_in_bytes (type) > 32)
+ abort (); /* shouldn't get here */
+
+ return function_arg_record_value (type, mode, 0, 1, regbase);
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ int bytes = int_size_in_bytes (type);
+
+ if (bytes > 32)
+ abort ();
+
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ if (incoming_p)
+ regno = BASE_RETURN_VALUE_REG (mode);
+ else
+ regno = BASE_OUTGOING_VALUE_REG (mode);
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Do what is necessary for `va_start'. The argument is ignored.
+
+ We look at the current function to determine if stdarg or varargs
+ is used and return the address of the first unnamed parameter. */
+
+rtx
+sparc_builtin_saveregs (arglist)
+ tree arglist ATTRIBUTE_UNUSED;
+{
+ int first_reg = current_function_args_info.words;
+ rtx address;
+ int regno;
+
+ for (regno = first_reg; regno < NPARM_REGS (word_mode); regno++)
+ emit_move_insn (gen_rtx_MEM (word_mode,
+ gen_rtx_PLUS (Pmode,
+ frame_pointer_rtx,
+ GEN_INT (STACK_POINTER_OFFSET
+ + UNITS_PER_WORD * regno))),
+ gen_rtx_REG (word_mode,
+ BASE_INCOMING_ARG_REG (word_mode) + regno));
+
+ address = gen_rtx_PLUS (Pmode,
+ frame_pointer_rtx,
+ GEN_INT (STACK_POINTER_OFFSET
+ + UNITS_PER_WORD * first_reg));
+
+ if (flag_check_memory_usage
+ && first_reg < NPARM_REGS (word_mode))
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ address, ptr_mode,
+ GEN_INT (UNITS_PER_WORD
+ * (NPARM_REGS (word_mode) - first_reg)),
+ TYPE_MODE (sizetype), GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ return address;
+}
+
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label. OP is the conditional expression.
+ XEXP (OP, 0) is assumed to be a condition code register (integer or
+ floating point) and its mode specifies what kind of comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ ANNUL is non-zero if we should generate an annulling branch.
+
+ NOOP is non-zero if we have to follow this branch by a noop.
+
+ INSN, if set, is the insn. */
+
+char *
+output_cbranch (op, label, reversed, annul, noop, insn)
+ rtx op;
+ int label;
+ int reversed, annul, noop;
+ rtx insn;
+{
+ static char string[20];
+ enum rtx_code code = GET_CODE (op);
+ rtx cc_reg = XEXP (op, 0);
+ enum machine_mode mode = GET_MODE (cc_reg);
+ static char v8_labelno[] = " %lX";
+ static char v9_icc_labelno[] = " %%icc,%lX";
+ static char v9_xcc_labelno[] = " %%xcc,%lX";
+ static char v9_fcc_labelno[] = " %%fccX,%lY";
+ char *labelno;
+ int labeloff;
+
+ /* ??? !v9: FP branches cannot be preceded by another floating point insn.
+ Because there is currently no concept of pre-delay slots, we can fix
+ this only by always emitting a nop before a floating point branch. */
+
+ if ((mode == CCFPmode || mode == CCFPEmode) && ! TARGET_V9)
+ strcpy (string, "nop\n\t");
+ else
+ string[0] = '\0';
+
+ /* If not floating-point or if EQ or NE, we can just reverse the code. */
+ if (reversed
+ && ((mode != CCFPmode && mode != CCFPEmode) || code == EQ || code == NE))
+ code = reverse_condition (code), reversed = 0;
+
+ /* Start by writing the branch condition. */
+ switch (code)
+ {
+ case NE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ strcat (string, "fbne");
+ else
+ strcpy (string, "bne");
+ break;
+
+ case EQ:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ strcat (string, "fbe");
+ else
+ strcpy (string, "be");
+ break;
+
+ case GE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbul");
+ else
+ strcat (string, "fbge");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bpos");
+ else
+ strcpy (string, "bge");
+ break;
+
+ case GT:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbule");
+ else
+ strcat (string, "fbg");
+ }
+ else
+ strcpy (string, "bg");
+ break;
+
+ case LE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbug");
+ else
+ strcat (string, "fble");
+ }
+ else
+ strcpy (string, "ble");
+ break;
+
+ case LT:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbuge");
+ else
+ strcat (string, "fbl");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bneg");
+ else
+ strcpy (string, "bl");
+ break;
+
+ case GEU:
+ strcpy (string, "bgeu");
+ break;
+
+ case GTU:
+ strcpy (string, "bgu");
+ break;
+
+ case LEU:
+ strcpy (string, "bleu");
+ break;
+
+ case LTU:
+ strcpy (string, "blu");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Now add the annulling, the label, and a possible noop. */
+ if (annul)
+ strcat (string, ",a");
+
+ if (! TARGET_V9)
+ {
+ labeloff = 3;
+ labelno = v8_labelno;
+ }
+ else
+ {
+ rtx note;
+
+ if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
+ strcat (string, INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
+
+ labeloff = 9;
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ labeloff = 10;
+ labelno = v9_fcc_labelno;
+ /* Set the char indicating the number of the fcc reg to use. */
+ labelno[6] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
+ }
+ else if (mode == CCXmode || mode == CCX_NOOVmode)
+ labelno = v9_xcc_labelno;
+ else
+ labelno = v9_icc_labelno;
+ }
+ /* Set the char indicating the number of the operand containing the
+ label_ref. */
+ labelno[labeloff] = label + '0';
+ strcat (string, labelno);
+
+ if (noop)
+ strcat (string, "\n\tnop");
+
+ return string;
+}
+
+/* Return the string to output a conditional branch to LABEL, testing
+ register REG. LABEL is the operand number of the label; REG is the
+ operand number of the reg. OP is the conditional expression. The mode
+ of REG says what kind of comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ ANNUL is non-zero if we should generate an annulling branch.
+
+ NOOP is non-zero if we have to follow this branch by a noop. */
+
+char *
+output_v9branch (op, reg, label, reversed, annul, noop)
+ rtx op;
+ int reg, label;
+ int reversed, annul, noop;
+{
+ static char string[20];
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode mode = GET_MODE (XEXP (op, 0));
+ static char labelno[] = " %X,%lX";
+
+ /* If not floating-point or if EQ or NE, we can just reverse the code. */
+ if (reversed)
+ code = reverse_condition (code), reversed = 0;
+
+ /* Only 64 bit versions of these instructions exist. */
+ if (mode != DImode)
+ abort ();
+
+ /* Start by writing the branch condition. */
+
+ switch (code)
+ {
+ case NE:
+ strcpy (string, "brnz");
+ break;
+
+ case EQ:
+ strcpy (string, "brz");
+ break;
+
+ case GE:
+ strcpy (string, "brgez");
+ break;
+
+ case LT:
+ strcpy (string, "brlz");
+ break;
+
+ case LE:
+ strcpy (string, "brlez");
+ break;
+
+ case GT:
+ strcpy (string, "brgz");
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Now add the annulling, reg, label, and nop. */
+ if (annul)
+ strcat (string, ",a");
+
+ /* ??? Optional prediction bit ",pt" or ",pf" goes here. */
+
+ labelno[2] = reg + '0';
+ labelno[6] = label + '0';
+ strcat (string, labelno);
+
+ if (noop)
+ strcat (string, "\n\tnop");
+
+ return string;
+}
+
+/* Renumber registers in delay slot. Replace registers instead of
+ renumbering because they may be shared.
+
+ This does not handle instructions other than move. */
+
+static void
+epilogue_renumber (where)
+ rtx *where;
+{
+ rtx x = *where;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case MEM:
+ *where = x = copy_rtx (x);
+ epilogue_renumber (&XEXP (x, 0));
+ return;
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ if (regno > 8 && regno < 24)
+ abort ();
+ if (regno >= 24 && regno < 32)
+ *where = gen_rtx_REG (GET_MODE (x), regno - 16);
+ return;
+ }
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case IOR:
+ case AND:
+ case XOR:
+ case PLUS:
+ case MINUS:
+ epilogue_renumber (&XEXP (x, 1));
+ case NEG:
+ case NOT:
+ epilogue_renumber (&XEXP (x, 0));
+ return;
+
+ default:
+ debug_rtx (*where);
+ abort();
+ }
+}
+
+/* Output assembler code to return from a function. */
+
+char *
+output_return (operands)
+ rtx *operands;
+{
+ rtx delay = final_sequence ? XVECEXP (final_sequence, 0, 1) : 0;
+
+ if (leaf_label)
+ {
+ operands[0] = leaf_label;
+ return "b%* %l0%(";
+ }
+ else if (leaf_function)
+ {
+ /* No delay slot in a leaf function. */
+ if (delay)
+ abort ();
+
+ /* If we didn't allocate a frame pointer for the current function,
+ the stack pointer might have been adjusted. Output code to
+ restore it now. */
+
+ operands[0] = GEN_INT (actual_fsize);
+
+ /* Use sub of negated value in first two cases instead of add to
+ allow actual_fsize == 4096. */
+
+ if (actual_fsize <= 4096)
+ {
+ if (SKIP_CALLERS_UNIMP_P)
+ return "jmp %%o7+12\n\tsub %%sp,-%0,%%sp";
+ else
+ return "retl\n\tsub %%sp,-%0,%%sp";
+ }
+ else if (actual_fsize <= 8192)
+ {
+ operands[0] = GEN_INT (actual_fsize - 4096);
+ if (SKIP_CALLERS_UNIMP_P)
+ return "sub %%sp,-4096,%%sp\n\tjmp %%o7+12\n\tsub %%sp,-%0,%%sp";
+ else
+ return "sub %%sp,-4096,%%sp\n\tretl\n\tsub %%sp,-%0,%%sp";
+ }
+ else if (SKIP_CALLERS_UNIMP_P)
+ {
+ if ((actual_fsize & 0x3ff) != 0)
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tjmp %%o7+12\n\tadd %%sp,%%g1,%%sp";
+ else
+ return "sethi %%hi(%a0),%%g1\n\tjmp %%o7+12\n\tadd %%sp,%%g1,%%sp";
+ }
+ else
+ {
+ if ((actual_fsize & 0x3ff) != 0)
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
+ else
+ return "sethi %%hi(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
+ }
+ }
+ else if (TARGET_V9)
+ {
+ if (delay)
+ {
+ epilogue_renumber (&SET_DEST (PATTERN (delay)));
+ epilogue_renumber (&SET_SRC (PATTERN (delay)));
+ }
+ if (SKIP_CALLERS_UNIMP_P)
+ return "return %%i7+12%#";
+ else
+ return "return %%i7+8%#";
+ }
+ else
+ {
+ if (delay)
+ abort ();
+ if (SKIP_CALLERS_UNIMP_P)
+ return "jmp %%i7+12\n\trestore";
+ else
+ return "ret\n\trestore";
+ }
+}
+
+/* Leaf functions and non-leaf functions have different needs. */
+
+static int
+reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
+
+static int
+reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
+
+static int *reg_alloc_orders[] = {
+ reg_leaf_alloc_order,
+ reg_nonleaf_alloc_order};
+
+void
+order_regs_for_local_alloc ()
+{
+ static int last_order_nonleaf = 1;
+
+ if (regs_ever_live[15] != last_order_nonleaf)
+ {
+ last_order_nonleaf = !last_order_nonleaf;
+ bcopy ((char *) reg_alloc_orders[last_order_nonleaf],
+ (char *) reg_alloc_order, FIRST_PSEUDO_REGISTER * sizeof (int));
+ }
+}
+
+/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
+ This makes them candidates for using ldd and std insns.
+
+ Note reg1 and reg2 *must* be hard registers. */
+
+int
+registers_ok_for_ldd_peep (reg1, reg2)
+ rtx reg1, reg2;
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ if (REGNO (reg1) % 2 != 0)
+ return 0;
+
+ /* Integer ldd is deprecated in SPARC V9 */
+ if (TARGET_V9 && REGNO (reg1) < 32)
+ return 0;
+
+ return (REGNO (reg1) == REGNO (reg2) - 1);
+}
+
+/* Return 1 if addr1 and addr2 are suitable for use in an ldd or
+ std insn.
+
+ This can only happen when addr1 and addr2 are consecutive memory
+ locations (addr1 + 4 == addr2). addr1 must also be aligned on a
+ 64 bit boundary (addr1 % 8 == 0).
+
+ We know %sp and %fp are kept aligned on a 64 bit boundary. Other
+ registers are assumed to *never* be properly aligned and are
+ rejected.
+
+ Knowing %sp and %fp are kept aligned on a 64 bit boundary, we
+ need only check that the offset for addr1 % 8 == 0. */
+
+int
+addrs_ok_for_ldd_peep (addr1, addr2)
+ rtx addr1, addr2;
+{
+ int reg1, offset1;
+
+ /* Extract a register number and offset (if used) from the first addr. */
+ if (GET_CODE (addr1) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr1, 0)) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (XEXP (addr1, 0));
+ /* The offset must be constant! */
+ if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
+ }
+ }
+ else if (GET_CODE (addr1) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (addr1);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset1 = 0;
+ }
+
+ /* Make sure the second address is a (mem (plus (reg) (const_int). */
+ if (GET_CODE (addr2) != PLUS)
+ return 0;
+
+ if (GET_CODE (XEXP (addr2, 0)) != REG
+ || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+
+ /* Only %fp and %sp are allowed. Additionally both addresses must
+ use the same register. */
+ if (reg1 != FRAME_POINTER_REGNUM && reg1 != STACK_POINTER_REGNUM)
+ return 0;
+
+ if (reg1 != REGNO (XEXP (addr2, 0)))
+ return 0;
+
+ /* The first offset must be evenly divisible by 8 to ensure the
+ address is 64 bit aligned. */
+ if (offset1 % 8 != 0)
+ return 0;
+
+ /* The offset for the second addr must be 4 more than the first addr. */
+ if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
+ return 0;
+
+ /* All the tests passed. addr1 and addr2 are valid for ldd and std
+ instructions. */
+ return 1;
+}
+
+/* Return 1 if reg is a pseudo, or is the first register in
+ a hard register pair. This makes it a candidate for use in
+ ldd and std insns. */
+
+int
+register_ok_for_ldd (reg)
+ rtx reg;
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ return (REGNO (reg) % 2 == 0);
+ else
+ return 1;
+}
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '#':
+ /* Output a 'nop' if there's nothing for the delay slot. */
+ if (dbr_sequence_length () == 0)
+ fputs ("\n\tnop", file);
+ return;
+ case '*':
+ /* Output an annul flag if there's nothing for the delay slot and we
+ are optimizing. This is always used with '(' below. */
+ /* Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
+ this is a dbx bug. So, we only do this when optimizing. */
+ /* On UltraSPARC, a branch in a delay slot causes a pipeline flush.
+ Always emit a nop in case the next instruction is a branch. */
+ if (dbr_sequence_length () == 0
+ && (optimize && (int)sparc_cpu < PROCESSOR_V9))
+ fputs (",a", file);
+ return;
+ case '(':
+ /* Output a 'nop' if there's nothing for the delay slot and we are
+ not optimizing. This is always used with '*' above. */
+ if (dbr_sequence_length () == 0
+ && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
+ fputs ("\n\tnop", file);
+ return;
+ case '_':
+ /* Output the Embedded Medium/Anywhere code model base register. */
+ fputs (EMBMEDANY_BASE_REG, file);
+ return;
+ case '@':
+ /* Print out what we are using as the frame pointer. This might
+ be %fp, or might be %sp+offset. */
+ /* ??? What if offset is too big? Perhaps the caller knows it isn't? */
+ fprintf (file, "%s+%d", frame_base_name, frame_base_offset);
+ return;
+ case 'Y':
+ /* Adjust the operand to take into account a RESTORE operation. */
+ if (GET_CODE (x) == CONST_INT)
+ break;
+ else if (GET_CODE (x) != REG)
+ output_operand_lossage ("Invalid %%Y operand");
+ else if (REGNO (x) < 8)
+ fputs (reg_names[REGNO (x)], file);
+ else if (REGNO (x) >= 24 && REGNO (x) < 32)
+ fputs (reg_names[REGNO (x)-16], file);
+ else
+ output_operand_lossage ("Invalid %%Y operand");
+ return;
+ case 'L':
+ /* Print out the low order register name of a register pair. */
+ if (WORDS_BIG_ENDIAN)
+ fputs (reg_names[REGNO (x)+1], file);
+ else
+ fputs (reg_names[REGNO (x)], file);
+ return;
+ case 'H':
+ /* Print out the high order register name of a register pair. */
+ if (WORDS_BIG_ENDIAN)
+ fputs (reg_names[REGNO (x)], file);
+ else
+ fputs (reg_names[REGNO (x)+1], file);
+ return;
+ case 'R':
+ /* Print out the second register name of a register pair or quad.
+ I.e., R (%o0) => %o1. */
+ fputs (reg_names[REGNO (x)+1], file);
+ return;
+ case 'S':
+ /* Print out the third register name of a register quad.
+ I.e., S (%o0) => %o2. */
+ fputs (reg_names[REGNO (x)+2], file);
+ return;
+ case 'T':
+ /* Print out the fourth register name of a register quad.
+ I.e., T (%o0) => %o3. */
+ fputs (reg_names[REGNO (x)+3], file);
+ return;
+ case 'x':
+ /* Print a condition code register. */
+ if (REGNO (x) == SPARC_ICC_REG)
+ {
+ /* We don't handle CC[X]_NOOVmode because they're not supposed
+ to occur here. */
+ if (GET_MODE (x) == CCmode)
+ fputs ("%icc", file);
+ else if (GET_MODE (x) == CCXmode)
+ fputs ("%xcc", file);
+ else
+ abort ();
+ }
+ else
+ /* %fccN register */
+ fputs (reg_names[REGNO (x)], file);
+ return;
+ case 'm':
+ /* Print the operand's address only. */
+ output_address (XEXP (x, 0));
+ return;
+ case 'r':
+ /* In this case we need a register. Use %g0 if the
+ operand is const0_rtx. */
+ if (x == const0_rtx
+ || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
+ {
+ fputs ("%g0", file);
+ return;
+ }
+ else
+ break;
+
+ case 'A':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("or", file); break;
+ case AND: fputs ("and", file); break;
+ case XOR: fputs ("xor", file); break;
+ default: output_operand_lossage ("Invalid %%A operand");
+ }
+ return;
+
+ case 'B':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("orn", file); break;
+ case AND: fputs ("andn", file); break;
+ case XOR: fputs ("xnor", file); break;
+ default: output_operand_lossage ("Invalid %%B operand");
+ }
+ return;
+
+ /* These are used by the conditional move instructions. */
+ case 'c' :
+ case 'C':
+ {
+ enum rtx_code rc = (code == 'c'
+ ? reverse_condition (GET_CODE (x))
+ : GET_CODE (x));
+ switch (rc)
+ {
+ case NE: fputs ("ne", file); break;
+ case EQ: fputs ("e", file); break;
+ case GE: fputs ("ge", file); break;
+ case GT: fputs ("g", file); break;
+ case LE: fputs ("le", file); break;
+ case LT: fputs ("l", file); break;
+ case GEU: fputs ("geu", file); break;
+ case GTU: fputs ("gu", file); break;
+ case LEU: fputs ("leu", file); break;
+ case LTU: fputs ("lu", file); break;
+ default: output_operand_lossage (code == 'c'
+ ? "Invalid %%c operand"
+ : "Invalid %%C operand");
+ }
+ return;
+ }
+
+ /* These are used by the movr instruction pattern. */
+ case 'd':
+ case 'D':
+ {
+ enum rtx_code rc = (code == 'd'
+ ? reverse_condition (GET_CODE (x))
+ : GET_CODE (x));
+ switch (rc)
+ {
+ case NE: fputs ("ne", file); break;
+ case EQ: fputs ("e", file); break;
+ case GE: fputs ("gez", file); break;
+ case LT: fputs ("lz", file); break;
+ case LE: fputs ("lez", file); break;
+ case GT: fputs ("gz", file); break;
+ default: output_operand_lossage (code == 'd'
+ ? "Invalid %%d operand"
+ : "Invalid %%D operand");
+ }
+ return;
+ }
+
+ case 'b':
+ {
+ /* Print a sign-extended character. */
+ int i = INTVAL (x) & 0xff;
+ if (i & 0x80)
+ i |= 0xffffff00;
+ fprintf (file, "%d", i);
+ return;
+ }
+
+ case 'f':
+ /* Operand must be a MEM; write its address. */
+ if (GET_CODE (x) != MEM)
+ output_operand_lossage ("Invalid %%f operand");
+ output_address (XEXP (x, 0));
+ return;
+
+ case 0:
+ /* Do nothing special. */
+ break;
+
+ default:
+ /* Undocumented flag. */
+ output_operand_lossage ("invalid operand output code");
+ }
+
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ fputc ('[', file);
+ /* Poor Sun assembler doesn't understand absolute addressing. */
+ if (CONSTANT_P (XEXP (x, 0))
+ && ! TARGET_LIVE_G0)
+ fputs ("%g0+", file);
+ output_address (XEXP (x, 0));
+ fputc (']', file);
+ }
+ else if (GET_CODE (x) == HIGH)
+ {
+ fputs ("%hi(", file);
+ output_addr_const (file, XEXP (x, 0));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == LO_SUM)
+ {
+ print_operand (file, XEXP (x, 0), 0);
+ fputs ("+%lo(", file);
+ output_addr_const (file, XEXP (x, 1));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && (GET_MODE (x) == VOIDmode
+ || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
+ {
+ if (CONST_DOUBLE_HIGH (x) == 0)
+ fprintf (file, "%u", CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_HIGH (x) == -1
+ && CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, "%d", CONST_DOUBLE_LOW (x));
+ else
+ output_operand_lossage ("long long constant not a valid immediate operand");
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ output_operand_lossage ("floating point constant not a valid immediate operand");
+ else { output_addr_const (file, x); }
+}
+
+/* This function outputs assembler code for VALUE to FILE, where VALUE is
+ a 64 bit (DImode) value. */
+
+/* ??? If there is a 64 bit counterpart to .word that the assembler
+ understands, then using that would simply this code greatly. */
+/* ??? We only output .xword's for symbols and only then in environments
+ where the assembler can handle them. */
+
+void
+output_double_int (file, value)
+ FILE *file;
+ rtx value;
+{
+ if (GET_CODE (value) == CONST_INT)
+ {
+ /* ??? This has endianness issues. */
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT xword = INTVAL (value);
+ HOST_WIDE_INT high, low;
+
+ high = (xword >> 32) & 0xffffffff;
+ low = xword & 0xffffffff;
+ ASM_OUTPUT_INT (file, GEN_INT (high));
+ ASM_OUTPUT_INT (file, GEN_INT (low));
+#else
+ if (INTVAL (value) < 0)
+ ASM_OUTPUT_INT (file, constm1_rtx);
+ else
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+#endif
+ }
+ else if (GET_CODE (value) == CONST_DOUBLE)
+ {
+ ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_HIGH (value)));
+ ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_LOW (value)));
+ }
+ else if (GET_CODE (value) == SYMBOL_REF
+ || GET_CODE (value) == CONST
+ || GET_CODE (value) == PLUS
+ || (TARGET_ARCH64 &&
+ (GET_CODE (value) == LABEL_REF
+ || GET_CODE (value) == CODE_LABEL
+ || GET_CODE (value) == MINUS)))
+ {
+ if (!TARGET_V9 || TARGET_CM_MEDLOW)
+ {
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+ }
+ else
+ {
+ fprintf (file, "\t%s\t", ASM_LONGLONG);
+ output_addr_const (file, value);
+ fprintf (file, "\n");
+ }
+ }
+ else
+ abort ();
+}
+
+/* Return the value of a code used in the .proc pseudo-op that says
+ what kind of result this function returns. For non-C types, we pick
+ the closest C type. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+unsigned long
+sparc_type_code (type)
+ register tree type;
+{
+ register unsigned long qualifiers = 0;
+ register unsigned shift;
+
+ /* Only the first 30 bits of the qualifier are valid. We must refrain from
+ setting more, since some assemblers will give an error for this. Also,
+ we must be careful to avoid shifts of 32 bits or more to avoid getting
+ unpredictable results. */
+
+ for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
+ {
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return qualifiers;
+
+ case ARRAY_TYPE:
+ qualifiers |= (3 << shift);
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ qualifiers |= (2 << shift);
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ qualifiers |= (1 << shift);
+ break;
+
+ case RECORD_TYPE:
+ return (qualifiers | 8);
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ return (qualifiers | 9);
+
+ case ENUMERAL_TYPE:
+ return (qualifiers | 10);
+
+ case VOID_TYPE:
+ return (qualifiers | 16);
+
+ case INTEGER_TYPE:
+ /* If this is a range type, consider it to be the underlying
+ type. */
+ if (TREE_TYPE (type) != 0)
+ break;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. We do this by
+ testing TYPE_PRECISION and TREE_UNSIGNED. The old code used to
+ look at both the names and the above fields, but that's redundant.
+ Any type whose size is between two C types will be considered
+ to be the wider of the two types. Also, we do not have a
+ special code to use for "long long", so anything wider than
+ long is treated the same. Note that we can't distinguish
+ between "int" and "long" in this code if they are the same
+ size, but that's fine, since neither can the assembler. */
+
+ if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 12 : 2));
+
+ else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 13 : 3));
+
+ else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 14 : 4));
+
+ else
+ return (qualifiers | (TREE_UNSIGNED (type) ? 15 : 5));
+
+ case REAL_TYPE:
+ /* If this is a range type, consider it to be the underlying
+ type. */
+ if (TREE_TYPE (type) != 0)
+ break;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. */
+
+ if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
+ return (qualifiers | 6);
+
+ else
+ return (qualifiers | 7);
+
+ case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
+ /* ??? We need to distinguish between double and float complex types,
+ but I don't know how yet because I can't reach this code from
+ existing front-ends. */
+ return (qualifiers | 7); /* Who knows? */
+
+ case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
+ case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
+ case FILE_TYPE: /* GNU Pascal FILE type. */
+ case SET_TYPE: /* GNU Pascal SET type. */
+ case LANG_TYPE: /* ? */
+ return qualifiers;
+
+ default:
+ abort (); /* Not a type! */
+ }
+ }
+
+ return qualifiers;
+}
+
+/* Nested function support. */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function.
+
+ This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
+ (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
+ (to store insns). This is a bit excessive. Perhaps a different
+ mechanism would be better here.
+
+ Emit enough FLUSH insns to synchronize the data and instruction caches. */
+
+void
+sparc_initialize_trampoline (tramp, fnaddr, cxt)
+ rtx tramp, fnaddr, cxt;
+{
+ /* SPARC 32 bit trampoline:
+
+ sethi %hi(fn),%g1
+ sethi %hi(static),%g2
+ jmp %g1+%lo(fn)
+ or %g2,%lo(static),%g2
+
+ SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
+ JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
+ */
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, fnaddr,
+ size_int (10), 0, 1),
+ GEN_INT (0x03000000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, cxt,
+ size_int (10), 0, 1),
+ GEN_INT (0x05000000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ expand_binop (SImode, ior_optab,
+ expand_and (fnaddr, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (0x81c06000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ expand_binop (SImode, ior_optab,
+ expand_and (cxt, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (0x8410a000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
+ /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
+ aligned on a 16 byte boundary so one flush clears it all. */
+ if (sparc_cpu != PROCESSOR_ULTRASPARC)
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
+ plus_constant (tramp, 8)))));
+}
+
+/* The 64 bit version is simpler because it makes more sense to load the
+ values as "immediate" data out of the trampoline. It's also easier since
+ we can read the PC without clobbering a register. */
+
+void
+sparc64_initialize_trampoline (tramp, fnaddr, cxt)
+ rtx tramp, fnaddr, cxt;
+{
+ /*
+ rd %pc,%g1
+ ldx [%g1+24],%g5
+ jmp %g5
+ ldx [%g1+16],%g5
+ +16 bytes data
+ */
+
+ emit_move_insn (gen_rtx_MEM (SImode, tramp),
+ GEN_INT (0x83414000));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ GEN_INT (0xca586018));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ GEN_INT (0x81c04000));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ GEN_INT (0xca586010));
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 20)), fnaddr);
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (DImode, tramp))));
+ if (sparc_cpu != PROCESSOR_ULTRASPARC)
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
+}
+
+/* Subroutines to support a flat (single) register window calling
+ convention. */
+
+/* Single-register window sparc stack frames look like:
+
+ Before call After call
+ +-----------------------+ +-----------------------+
+ high | | | |
+ mem | caller's temps. | | caller's temps. |
+ | | | |
+ +-----------------------+ +-----------------------+
+ | | | |
+ | arguments on stack. | | arguments on stack. |
+ | | | |
+ +-----------------------+FP+92->+-----------------------+
+ | 6 words to save | | 6 words to save |
+ | arguments passed | | arguments passed |
+ | in registers, even | | in registers, even |
+ | if not passed. | | if not passed. |
+ SP+68->+-----------------------+FP+68->+-----------------------+
+ | 1 word struct addr | | 1 word struct addr |
+ +-----------------------+FP+64->+-----------------------+
+ | | | |
+ | 16 word reg save area | | 16 word reg save area |
+ | | | |
+ SP->+-----------------------+ FP->+-----------------------+
+ | 4 word area for |
+ | fp/alu reg moves |
+ FP-16->+-----------------------+
+ | |
+ | local variables |
+ | |
+ +-----------------------+
+ | |
+ | fp register save |
+ | |
+ +-----------------------+
+ | |
+ | gp register save |
+ | |
+ +-----------------------+
+ | |
+ | alloca allocations |
+ | |
+ +-----------------------+
+ | |
+ | arguments on stack |
+ | |
+ SP+92->+-----------------------+
+ | 6 words to save |
+ | arguments passed |
+ | in registers, even |
+ low | if not passed. |
+ memory SP+68->+-----------------------+
+ | 1 word struct addr |
+ SP+64->+-----------------------+
+ | |
+ I 16 word reg save area |
+ | |
+ SP->+-----------------------+ */
+
+/* Structure to be filled in by sparc_flat_compute_frame_size with register
+ save masks, and offsets for the current function. */
+
+struct sparc_frame_info
+{
+ unsigned long total_size; /* # bytes that the entire frame takes up. */
+ unsigned long var_size; /* # bytes that variables take up. */
+ unsigned long args_size; /* # bytes that outgoing arguments take up. */
+ unsigned long extra_size; /* # bytes of extra gunk. */
+ unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
+ unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
+ unsigned long gmask; /* Mask of saved gp registers. */
+ unsigned long fmask; /* Mask of saved fp registers. */
+ unsigned long reg_offset; /* Offset from new sp to store regs. */
+ int initialized; /* Nonzero if frame size already calculated. */
+};
+
+/* Current frame information calculated by sparc_flat_compute_frame_size. */
+struct sparc_frame_info current_frame_info;
+
+/* Zero structure to initialize current_frame_info. */
+struct sparc_frame_info zero_frame_info;
+
+/* Tell prologue and epilogue if register REGNO should be saved / restored. */
+
+#define RETURN_ADDR_REGNUM 15
+#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
+
+#define MUST_SAVE_REGISTER(regno) \
+ ((regs_ever_live[regno] && !call_used_regs[regno]) \
+ || (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) \
+ || (regno == RETURN_ADDR_REGNUM && regs_ever_live[RETURN_ADDR_REGNUM]))
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer. */
+
+unsigned long
+sparc_flat_compute_frame_size (size)
+ int size; /* # of var. bytes allocated. */
+{
+ int regno;
+ unsigned long total_size; /* # bytes that the entire frame takes up. */
+ unsigned long var_size; /* # bytes that variables take up. */
+ unsigned long args_size; /* # bytes that outgoing arguments take up. */
+ unsigned long extra_size; /* # extra bytes. */
+ unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
+ unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
+ unsigned long gmask; /* Mask of saved gp registers. */
+ unsigned long fmask; /* Mask of saved fp registers. */
+ unsigned long reg_offset; /* Offset to register save area. */
+ int need_aligned_p; /* 1 if need the save area 8 byte aligned. */
+
+ /* This is the size of the 16 word reg save area, 1 word struct addr
+ area, and 4 word fp/alu register copy area. */
+ extra_size = -STARTING_FRAME_OFFSET + FIRST_PARM_OFFSET(0);
+ var_size = size;
+ /* Also include the size needed for the 6 parameter registers. */
+ args_size = current_function_outgoing_args_size + 24;
+ total_size = var_size + args_size + extra_size;
+ gp_reg_size = 0;
+ fp_reg_size = 0;
+ gmask = 0;
+ fmask = 0;
+ reg_offset = 0;
+ need_aligned_p = 0;
+
+ /* Calculate space needed for gp registers. */
+ for (regno = 1; regno <= 31; regno++)
+ {
+ if (MUST_SAVE_REGISTER (regno))
+ {
+ /* If we need to save two regs in a row, ensure there's room to bump
+ up the address to align it to a doubleword boundary. */
+ if ((regno & 0x1) == 0 && MUST_SAVE_REGISTER (regno+1))
+ {
+ if (gp_reg_size % 8 != 0)
+ gp_reg_size += 4;
+ gp_reg_size += 2 * UNITS_PER_WORD;
+ gmask |= 3 << regno;
+ regno++;
+ need_aligned_p = 1;
+ }
+ else
+ {
+ gp_reg_size += UNITS_PER_WORD;
+ gmask |= 1 << regno;
+ }
+ }
+ }
+
+ /* Calculate space needed for fp registers. */
+ for (regno = 32; regno <= 63; regno++)
+ {
+ if (regs_ever_live[regno] && !call_used_regs[regno])
+ {
+ fp_reg_size += UNITS_PER_WORD;
+ fmask |= 1 << (regno - 32);
+ }
+ }
+
+ if (gmask || fmask)
+ {
+ int n;
+ reg_offset = FIRST_PARM_OFFSET(0) + args_size;
+ /* Ensure save area is 8 byte aligned if we need it. */
+ n = reg_offset % 8;
+ if (need_aligned_p && n != 0)
+ {
+ total_size += 8 - n;
+ reg_offset += 8 - n;
+ }
+ total_size += gp_reg_size + fp_reg_size;
+ }
+
+ /* ??? This looks a little suspicious. Clarify. */
+ if (total_size == extra_size)
+ total_size = extra_size = 0;
+
+ total_size = SPARC_STACK_ALIGN (total_size);
+
+ /* Save other computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.var_size = var_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.extra_size = extra_size;
+ current_frame_info.gp_reg_size = gp_reg_size;
+ current_frame_info.fp_reg_size = fp_reg_size;
+ current_frame_info.gmask = gmask;
+ current_frame_info.fmask = fmask;
+ current_frame_info.reg_offset = reg_offset;
+ current_frame_info.initialized = reload_completed;
+
+ /* Ok, we're done. */
+ return total_size;
+}
+
+/* Save/restore registers in GMASK and FMASK at register BASE_REG plus offset
+ OFFSET.
+
+ BASE_REG must be 8 byte aligned. This allows us to test OFFSET for
+ appropriate alignment and use DOUBLEWORD_OP when we can. We assume
+ [BASE_REG+OFFSET] will always be a valid address.
+
+ WORD_OP is either "st" for save, "ld" for restore.
+ DOUBLEWORD_OP is either "std" for save, "ldd" for restore. */
+
+void
+sparc_flat_save_restore (file, base_reg, offset, gmask, fmask, word_op,
+ doubleword_op, base_offset)
+ FILE *file;
+ char *base_reg;
+ unsigned int offset;
+ unsigned long gmask;
+ unsigned long fmask;
+ char *word_op;
+ char *doubleword_op;
+ unsigned long base_offset;
+{
+ int regno;
+
+ if (gmask == 0 && fmask == 0)
+ return;
+
+ /* Save registers starting from high to low. We've already saved the
+ previous frame pointer and previous return address for the debugger's
+ sake. The debugger allows us to not need a nop in the epilog if at least
+ one register is reloaded in addition to return address. */
+
+ if (gmask)
+ {
+ for (regno = 1; regno <= 31; regno++)
+ {
+ if ((gmask & (1L << regno)) != 0)
+ {
+ if ((regno & 0x1) == 0 && ((gmask & (1L << (regno+1))) != 0))
+ {
+ /* We can save two registers in a row. If we're not at a
+ double word boundary, move to one.
+ sparc_flat_compute_frame_size ensures there's room to do
+ this. */
+ if (offset % 8 != 0)
+ offset += UNITS_PER_WORD;
+
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ doubleword_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ dwarf2out_reg_save (l, regno, offset + base_offset);
+ dwarf2out_reg_save
+ (l, regno+1, offset+base_offset + UNITS_PER_WORD);
+ }
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ doubleword_op, base_reg, offset,
+ reg_names[regno]);
+
+ offset += 2 * UNITS_PER_WORD;
+ regno++;
+ }
+ else
+ {
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ word_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", regno, offset + base_offset);
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ word_op, base_reg, offset, reg_names[regno]);
+
+ offset += UNITS_PER_WORD;
+ }
+ }
+ }
+ }
+
+ if (fmask)
+ {
+ for (regno = 32; regno <= 63; regno++)
+ {
+ if ((fmask & (1L << (regno - 32))) != 0)
+ {
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ word_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", regno, offset + base_offset);
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ word_op, base_reg, offset, reg_names[regno]);
+
+ offset += UNITS_PER_WORD;
+ }
+ }
+ }
+}
+
+/* Set up the stack and frame (if desired) for the function. */
+
+void
+sparc_flat_output_function_prologue (file, size)
+ FILE *file;
+ int size;
+{
+ char *sp_str = reg_names[STACK_POINTER_REGNUM];
+ unsigned long gmask = current_frame_info.gmask;
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
+ fprintf (file, "\t%s# vars= %ld, regs= %d/%d, args= %d, extra= %ld\n",
+ ASM_COMMENT_START,
+ current_frame_info.var_size,
+ current_frame_info.gp_reg_size / 4,
+ current_frame_info.fp_reg_size / 4,
+ current_function_outgoing_args_size,
+ current_frame_info.extra_size);
+
+ size = SPARC_STACK_ALIGN (size);
+ size = (! current_frame_info.initialized
+ ? sparc_flat_compute_frame_size (size)
+ : current_frame_info.total_size);
+
+ /* These cases shouldn't happen. Catch them now. */
+ if (size == 0 && (gmask || current_frame_info.fmask))
+ abort ();
+
+ /* Allocate our stack frame by decrementing %sp.
+ At present, the only algorithm gdb can use to determine if this is a
+ flat frame is if we always set %i7 if we set %sp. This can be optimized
+ in the future by putting in some sort of debugging information that says
+ this is a `flat' function. However, there is still the case of debugging
+ code without such debugging information (including cases where most fns
+ have such info, but there is one that doesn't). So, always do this now
+ so we don't get a lot of code out there that gdb can't handle.
+ If the frame pointer isn't needn't then that's ok - gdb won't be able to
+ distinguish us from a non-flat function but there won't (and shouldn't)
+ be any differences anyway. The return pc is saved (if necessary) right
+ after %i7 so gdb won't have to look too far to find it. */
+ if (size > 0)
+ {
+ unsigned int reg_offset = current_frame_info.reg_offset;
+ char *fp_str = reg_names[FRAME_POINTER_REGNUM];
+ char *t1_str = "%g1";
+
+ /* Things get a little tricky if local variables take up more than ~4096
+ bytes and outgoing arguments take up more than ~4096 bytes. When that
+ happens, the register save area can't be accessed from either end of
+ the frame. Handle this by decrementing %sp to the start of the gp
+ register save area, save the regs, update %i7, and then set %sp to its
+ final value. Given that we only have one scratch register to play
+ with it is the cheapest solution, and it helps gdb out as it won't
+ slow down recognition of flat functions.
+ Don't change the order of insns emitted here without checking with
+ the gdb folk first. */
+
+ /* Is the entire register save area offsettable from %sp? */
+ if (reg_offset < 4096 - 64 * UNITS_PER_WORD)
+ {
+ if (size <= 4096)
+ {
+ fprintf (file, "\tadd %s,%d,%s\n",
+ sp_str, -size, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ fp_str, sp_str, reg_offset);
+ fprintf (file, "\tsub %s,%d,%s\t%s# set up frame pointer\n",
+ sp_str, -size, fp_str, ASM_COMMENT_START);
+ reg_offset += 4;
+ }
+ }
+ else
+ {
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size, t1_str, sp_str, t1_str, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ fp_str, sp_str, reg_offset);
+ fprintf (file, "\tadd %s,%s,%s\t%s# set up frame pointer\n",
+ sp_str, t1_str, fp_str, ASM_COMMENT_START);
+ reg_offset += 4;
+ }
+ }
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ reg_offset - 4 - size);
+ dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ }
+ else
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size);
+ }
+ if (gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[RETURN_ADDR_REGNUM], sp_str, reg_offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_return_save ("", reg_offset - size);
+ reg_offset += 4;
+ }
+ sparc_flat_save_restore (file, sp_str, reg_offset,
+ gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "st", "std", -size);
+ }
+ else
+ {
+ /* Subtract %sp in two steps, but make sure there is always a
+ 64 byte register save area, and %sp is properly aligned. */
+ /* Amount to decrement %sp by, the first time. */
+ unsigned int size1 = ((size - reg_offset + 64) + 15) & -16;
+ /* Offset to register save area from %sp. */
+ unsigned int offset = size1 - (size - reg_offset);
+
+ if (size1 <= 4096)
+ {
+ fprintf (file, "\tadd %s,%d,%s\n",
+ sp_str, -size1, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n\tsub %s,%d,%s\t%s# set up frame pointer\n",
+ fp_str, sp_str, offset, sp_str, -size1, fp_str,
+ ASM_COMMENT_START);
+ offset += 4;
+ }
+ }
+ else
+ {
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size1, t1_str, sp_str, t1_str, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n\tadd %s,%s,%s\t%s# set up frame pointer\n",
+ fp_str, sp_str, offset, sp_str, t1_str, fp_str,
+ ASM_COMMENT_START);
+ offset += 4;
+ }
+ }
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ offset - 4 - size1);
+ dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ }
+ else
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size1);
+ }
+ if (gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[RETURN_ADDR_REGNUM], sp_str, offset);
+ if (dwarf2out_do_frame ())
+ /* offset - size1 == reg_offset - size
+ if reg_offset were updated above like offset. */
+ dwarf2out_return_save ("", offset - size1);
+ offset += 4;
+ }
+ sparc_flat_save_restore (file, sp_str, offset,
+ gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "st", "std", -size1);
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size - size1, t1_str, sp_str, t1_str, sp_str);
+ if (dwarf2out_do_frame ())
+ if (! (gmask & FRAME_POINTER_MASK))
+ dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, size);
+ }
+ }
+
+ fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
+}
+
+/* Do any necessary cleanup after a function to restore stack, frame,
+ and regs. */
+
+void
+sparc_flat_output_function_epilogue (file, size)
+ FILE *file;
+ int size;
+{
+ rtx epilogue_delay = current_function_epilogue_delay_list;
+ int noepilogue = FALSE;
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#EPILOGUE#\n", ASM_COMMENT_START);
+
+ /* The epilogue does not depend on any registers, but the stack
+ registers, so we assume that if we have 1 pending nop, it can be
+ ignored, and 2 it must be filled (2 nops occur for integer
+ multiply and divide). */
+
+ size = SPARC_STACK_ALIGN (size);
+ size = (!current_frame_info.initialized
+ ? sparc_flat_compute_frame_size (size)
+ : current_frame_info.total_size);
+
+ if (size == 0 && epilogue_delay == 0)
+ {
+ rtx insn = get_last_insn ();
+
+ /* If the last insn was a BARRIER, we don't have to write any code
+ because a jump (aka return) was put there. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ noepilogue = TRUE;
+ }
+
+ if (!noepilogue)
+ {
+ unsigned int reg_offset = current_frame_info.reg_offset;
+ unsigned int size1;
+ char *sp_str = reg_names[STACK_POINTER_REGNUM];
+ char *fp_str = reg_names[FRAME_POINTER_REGNUM];
+ char *t1_str = "%g1";
+
+ /* In the reload sequence, we don't need to fill the load delay
+ slots for most of the loads, also see if we can fill the final
+ delay slot if not otherwise filled by the reload sequence. */
+
+ if (size > 4095)
+ fprintf (file, "\tset %d,%s\n", size, t1_str);
+
+ if (frame_pointer_needed)
+ {
+ if (size > 4095)
+ fprintf (file,"\tsub %s,%s,%s\t\t%s# sp not trusted here\n",
+ fp_str, t1_str, sp_str, ASM_COMMENT_START);
+ else
+ fprintf (file,"\tsub %s,%d,%s\t\t%s# sp not trusted here\n",
+ fp_str, size, sp_str, ASM_COMMENT_START);
+ }
+
+ /* Is the entire register save area offsettable from %sp? */
+ if (reg_offset < 4096 - 64 * UNITS_PER_WORD)
+ {
+ size1 = 0;
+ }
+ else
+ {
+ /* Restore %sp in two steps, but make sure there is always a
+ 64 byte register save area, and %sp is properly aligned. */
+ /* Amount to increment %sp by, the first time. */
+ size1 = ((reg_offset - 64 - 16) + 15) & -16;
+ /* Offset to register save area from %sp. */
+ reg_offset = size1 - reg_offset;
+
+ fprintf (file, "\tset %d,%s\n\tadd %s,%s,%s\n",
+ size1, t1_str, sp_str, t1_str, sp_str);
+ }
+
+ /* We must restore the frame pointer and return address reg first
+ because they are treated specially by the prologue output code. */
+ if (current_frame_info.gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tld [%s+%d],%s\n",
+ sp_str, reg_offset, fp_str);
+ reg_offset += 4;
+ }
+ if (current_frame_info.gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tld [%s+%d],%s\n",
+ sp_str, reg_offset, reg_names[RETURN_ADDR_REGNUM]);
+ reg_offset += 4;
+ }
+
+ /* Restore any remaining saved registers. */
+ sparc_flat_save_restore (file, sp_str, reg_offset,
+ current_frame_info.gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "ld", "ldd", 0);
+
+ /* If we had to increment %sp in two steps, record it so the second
+ restoration in the epilogue finishes up. */
+ if (size1 > 0)
+ {
+ size -= size1;
+ if (size > 4095)
+ fprintf (file, "\tset %d,%s\n",
+ size, t1_str);
+ }
+
+ if (current_function_returns_struct)
+ fprintf (file, "\tjmp %%o7+12\n");
+ else
+ fprintf (file, "\tretl\n");
+
+ /* If the only register saved is the return address, we need a
+ nop, unless we have an instruction to put into it. Otherwise
+ we don't since reloading multiple registers doesn't reference
+ the register being loaded. */
+
+ if (epilogue_delay)
+ {
+ if (size)
+ abort ();
+ final_scan_insn (XEXP (epilogue_delay, 0), file, 1, -2, 1);
+ }
+
+ else if (size > 4095)
+ fprintf (file, "\tadd %s,%s,%s\n", sp_str, t1_str, sp_str);
+
+ else if (size > 0)
+ fprintf (file, "\tadd %s,%d,%s\n", sp_str, size, sp_str);
+
+ else
+ fprintf (file, "\tnop\n");
+ }
+
+ /* Reset state info for each function. */
+ current_frame_info = zero_frame_info;
+}
+
+/* Define the number of delay slots needed for the function epilogue.
+
+ On the sparc, we need a slot if either no stack has been allocated,
+ or the only register saved is the return register. */
+
+int
+sparc_flat_epilogue_delay_slots ()
+{
+ if (!current_frame_info.initialized)
+ (void) sparc_flat_compute_frame_size (get_frame_size ());
+
+ if (current_frame_info.total_size == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Return true is TRIAL is a valid insn for the epilogue delay slot.
+ Any single length instruction which doesn't reference the stack or frame
+ pointer is OK. */
+
+int
+sparc_flat_eligible_for_epilogue_delay (trial, slot)
+ rtx trial;
+ int slot ATTRIBUTE_UNUSED;
+{
+ rtx pat = PATTERN (trial);
+
+ if (get_attr_length (trial) != 1)
+ return 0;
+
+ /* If %g0 is live, there are lots of things we can't handle.
+ Rather than trying to find them all now, let's punt and only
+ optimize things as necessary. */
+ if (TARGET_LIVE_G0)
+ return 0;
+
+ if (! reg_mentioned_p (stack_pointer_rtx, pat)
+ && ! reg_mentioned_p (frame_pointer_rtx, pat))
+ return 1;
+
+ return 0;
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+int
+supersparc_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ enum attr_type insn_type;
+
+ if (! recog_memoized (insn))
+ return 0;
+
+ insn_type = get_attr_type (insn);
+
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ /* Data dependency; DEP_INSN writes a register that INSN reads some
+ cycles later. */
+
+ /* if a load, then the dependence must be on the memory address;
+ add an extra 'cycle'. Note that the cost could be two cycles
+ if the reg was written late in an instruction group; we can't tell
+ here. */
+ if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
+ return cost + 3;
+
+ /* Get the delay only if the address of the store is the dependence. */
+ if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
+ {
+ rtx pat = PATTERN(insn);
+ rtx dep_pat = PATTERN (dep_insn);
+
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
+ return cost; /* This shouldn't happen! */
+
+ /* The dependency between the two instructions was on the data that
+ is being stored. Assume that this implies that the address of the
+ store is not dependent. */
+ if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
+ return cost;
+
+ return cost + 3; /* An approximation. */
+ }
+
+ /* A shift instruction cannot receive its data from an instruction
+ in the same cycle; add a one cycle penalty. */
+ if (insn_type == TYPE_SHIFT)
+ return cost + 3; /* Split before cascade into shift. */
+ }
+ else
+ {
+ /* Anti- or output- dependency; DEP_INSN reads/writes a register that
+ INSN writes some cycles later. */
+
+ /* These are only significant for the fpu unit; writing a fp reg before
+ the fpu has finished with it stalls the processor. */
+
+ /* Reusing an integer register causes no problems. */
+ if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
+ return 0;
+ }
+
+ return cost;
+}
+
+int
+ultrasparc_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ enum attr_type insn_type, dep_type;
+ rtx pat = PATTERN(insn);
+ rtx dep_pat = PATTERN (dep_insn);
+
+ if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
+ return cost;
+
+ insn_type = get_attr_type (insn);
+ dep_type = get_attr_type (dep_insn);
+
+#define SLOW_FP(dep_type) \
+(dep_type == TYPE_FPSQRT || dep_type == TYPE_FPDIVS || dep_type == TYPE_FPDIVD)
+
+ switch (REG_NOTE_KIND (link))
+ {
+ case 0:
+ /* Data dependency; DEP_INSN writes a register that INSN reads some
+ cycles later. */
+
+ switch (insn_type)
+ {
+ /* UltraSPARC can dual issue a store and an instruction setting
+ the value stored, except for divide and square root. */
+ case TYPE_FPSTORE:
+ if (! SLOW_FP (dep_type))
+ return 0;
+ return cost;
+
+ case TYPE_STORE:
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
+ return cost;
+
+ if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
+ /* The dependency between the two instructions is on the data
+ that is being stored. Assume that the address of the store
+ is not also dependent. */
+ return 0;
+ return cost;
+
+ case TYPE_LOAD:
+ case TYPE_SLOAD:
+ case TYPE_FPLOAD:
+ /* A load does not return data until at least 11 cycles after
+ a store to the same location. 3 cycles are accounted for
+ in the load latency; add the other 8 here. */
+ if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
+ {
+ /* If the addresses are not equal this may be a false
+ dependency because pointer aliasing could not be
+ determined. Add only 2 cycles in that case. 2 is
+ an arbitrary compromise between 8, which would cause
+ the scheduler to generate worse code elsewhere to
+ compensate for a dependency which might not really
+ exist, and 0. */
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
+ || GET_CODE (SET_SRC (pat)) != MEM
+ || GET_CODE (SET_DEST (dep_pat)) != MEM
+ || ! rtx_equal_p (XEXP (SET_SRC (pat), 0),
+ XEXP (SET_DEST (dep_pat), 0)))
+ return cost + 2;
+
+ return cost + 8;
+ }
+ return cost;
+
+ case TYPE_BRANCH:
+ /* Compare to branch latency is 0. There is no benefit from
+ separating compare and branch. */
+ if (dep_type == TYPE_COMPARE)
+ return 0;
+ /* Floating point compare to branch latency is less than
+ compare to conditional move. */
+ if (dep_type == TYPE_FPCMP)
+ return cost - 1;
+ return cost;
+
+ case TYPE_FPCMOVE:
+ /* FMOVR class instructions can not issue in the same cycle
+ or the cycle after an instruction which writes any
+ integer register. Model this as cost 2 for dependent
+ instructions. */
+ if ((dep_type == TYPE_IALU || dep_type == TYPE_UNARY
+ || dep_type == TYPE_BINARY)
+ && cost < 2)
+ return 2;
+ /* Otherwise check as for integer conditional moves. */
+
+ case TYPE_CMOVE:
+ /* Conditional moves involving integer registers wait until
+ 3 cycles after loads return data. The interlock applies
+ to all loads, not just dependent loads, but that is hard
+ to model. */
+ if (dep_type == TYPE_LOAD || dep_type == TYPE_SLOAD)
+ return cost + 3;
+ return cost;
+
+ default:
+ break;
+ }
+ break;
+
+ case REG_DEP_ANTI:
+ /* Divide and square root lock destination registers for full latency. */
+ if (! SLOW_FP (dep_type))
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Other costs not accounted for:
+ - Multiply should be modeled as having no latency because there is
+ nothing the scheduler can do about it.
+ - Single precision floating point loads lock the other half of
+ the even/odd register pair.
+ - Several hazards associated with ldd/std are ignored because these
+ instructions are rarely generated for V9.
+ - A shift following an integer instruction which does not set the
+ condition codes can not issue in the same cycle.
+ - The floating point pipeline can not have both a single and double
+ precision operation active at the same time. Format conversions
+ and graphics instructions are given honorary double precision status.
+ - call and jmpl are always the first instruction in a group. */
+
+ return cost;
+}
+
+int
+sparc_issue_rate ()
+{
+ switch (sparc_cpu)
+ {
+ default:
+ return 1;
+ case PROCESSOR_V9:
+ /* Assume V9 processors are capable of at least dual-issue. */
+ return 2;
+ case PROCESSOR_SUPERSPARC:
+ return 3;
+ case PROCESSOR_ULTRASPARC:
+ return 4;
+ }
+}
+
+static int
+set_extends(x, insn)
+ rtx x, insn;
+{
+ register rtx pat = PATTERN (insn);
+
+ switch (GET_CODE (SET_SRC (pat)))
+ {
+ /* Load and some shift instructions zero extend. */
+ case MEM:
+ case ZERO_EXTEND:
+ /* sethi clears the high bits */
+ case HIGH:
+ /* LO_SUM is used with sethi. sethi cleared the high
+ bits and the values used with lo_sum are positive */
+ case LO_SUM:
+ /* UNSPEC is v8plus_clear_high */
+ case UNSPEC:
+ /* Store flag stores 0 or 1 */
+ case LT: case LTU:
+ case GT: case GTU:
+ case LE: case LEU:
+ case GE: case GEU:
+ case EQ:
+ case NE:
+ return 1;
+ case AND:
+ {
+ rtx op1 = XEXP (SET_SRC (pat), 1);
+ if (GET_CODE (op1) == CONST_INT)
+ return INTVAL (op1) >= 0;
+ if (GET_CODE (XEXP (SET_SRC (pat), 0)) == REG
+ && sparc_check_64 (XEXP (SET_SRC (pat), 0), insn) == 1)
+ return 1;
+ if (GET_CODE (op1) == REG
+ && sparc_check_64 ((op1), insn) == 1)
+ return 1;
+ }
+ case ASHIFT:
+ case LSHIFTRT:
+ return GET_MODE (SET_SRC (pat)) == SImode;
+ /* Positive integers leave the high bits zero. */
+ case CONST_DOUBLE:
+ return ! (CONST_DOUBLE_LOW (x) & 0x80000000);
+ case CONST_INT:
+ return ! (INTVAL (x) & 0x80000000);
+ case ASHIFTRT:
+ case SIGN_EXTEND:
+ return - (GET_MODE (SET_SRC (pat)) == SImode);
+ default:
+ return 0;
+ }
+}
+
+/* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
+ unknown. Return 1 if the high bits are zero, -1 if the register is
+ sign extended. */
+int
+sparc_check_64 (x, insn)
+ rtx x, insn;
+{
+ /* If a register is set only once it is safe to ignore insns this
+ code does not know how to handle. The loop will either recognize
+ the single set and return the correct value or fail to recognize
+ it and return 0. */
+ int set_once = 0;
+
+ if (GET_CODE (x) == REG
+ && flag_expensive_optimizations
+ && REG_N_SETS (REGNO (x)) == 1)
+ set_once = 1;
+
+ if (insn == 0)
+ {
+ if (set_once)
+ insn = get_last_insn_anywhere ();
+ else
+ return 0;
+ }
+
+ while ((insn = PREV_INSN (insn)))
+ {
+ switch (GET_CODE (insn))
+ {
+ case JUMP_INSN:
+ case NOTE:
+ break;
+ case CODE_LABEL:
+ case CALL_INSN:
+ default:
+ if (! set_once)
+ return 0;
+ break;
+ case INSN:
+ {
+ rtx pat = PATTERN (insn);
+ if (GET_CODE (pat) != SET)
+ return 0;
+ if (rtx_equal_p (x, SET_DEST (pat)))
+ return set_extends (x, insn);
+ if (reg_overlap_mentioned_p (SET_DEST (pat), x))
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+char *
+sparc_v8plus_shift (operands, insn, opcode)
+ rtx *operands;
+ rtx insn;
+ char *opcode;
+{
+ static char asm_code[60];
+
+ if (GET_CODE (operands[3]) == SCRATCH)
+ operands[3] = operands[0];
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ output_asm_insn ("mov %1,%3", operands);
+ }
+ else
+ {
+ output_asm_insn ("sllx %H1,32,%3", operands);
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn ("srl %L1,0,%L1", operands);
+ output_asm_insn ("or %L1,%3,%3", operands);
+ }
+
+ strcpy(asm_code, opcode);
+ if (which_alternative != 2)
+ return strcat (asm_code, " %0,%2,%L0\n\tsrlx %L0,32,%H0");
+ else
+ return strcat (asm_code, " %3,%2,%3\n\tsrlx %3,32,%H0\n\tmov %3,%L0");
+}
+
+
+/* Return 1 if DEST and SRC reference only global and in registers. */
+
+int
+sparc_return_peephole_ok (dest, src)
+ rtx dest, src;
+{
+ if (! TARGET_V9)
+ return 0;
+ if (leaf_function)
+ return 0;
+ if (GET_CODE (src) != CONST_INT
+ && (GET_CODE (src) != REG || ! IN_OR_GLOBAL_P (src)))
+ return 0;
+ return IN_OR_GLOBAL_P (dest);
+}
diff --git a/contrib/gcc/config/sparc/sparc.h b/contrib/gcc/config/sparc/sparc.h
new file mode 100644
index 0000000..e66f5e6
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.h
@@ -0,0 +1,3287 @@
+/* Definitions of target machine for GNU compiler, for Sun SPARC.
+ Copyright (C) 1987, 88, 89, 92, 94-97, 1998 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+ 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+ at Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Note that some other tm.h files include this one and then override
+ whatever definitions are necessary. */
+
+/* Specify this in a cover file to provide bi-architecture (32/64) support. */
+/* #define SPARC_BI_ARCH */
+
+/* Macro used later in this file to determine default architecture. */
+#define DEFAULT_ARCH32_P ((TARGET_DEFAULT & MASK_64BIT) == 0)
+
+/* TARGET_ARCH{32,64} are the main macros to decide which of the two
+ architectures to compile for. We allow targets to choose compile time or
+ runtime selection. */
+#ifdef SPARC_BI_ARCH
+#define TARGET_ARCH32 (! TARGET_64BIT)
+#else
+#define TARGET_ARCH32 (DEFAULT_ARCH32_P)
+#endif
+#define TARGET_ARCH64 (! TARGET_ARCH32)
+
+/* Code model selection.
+ -mcmodel is used to select the v9 code model.
+ Different code models aren't supported for v8 code.
+
+ TARGET_CM_32: 32 bit address space, top 32 bits = 0,
+ pointers are 32 bits. Note that this isn't intended
+ to imply a v8 abi.
+
+ TARGET_CM_MEDLOW: 32 bit address space, top 32 bits = 0,
+ avoid generating %uhi and %ulo terms,
+ pointers are 64 bits.
+
+ TARGET_CM_MEDMID: 64 bit address space.
+ The executable must be in the low 16 TB of memory.
+ This corresponds to the low 44 bits, and the %[hml]44
+ relocs are used.
+
+ TARGET_CM_MEDANY: 64 bit address space.
+ The text and data segments have a maximum size of 31
+ bits and may be located anywhere. The maximum offset
+ from any instruction to the label _GLOBAL_OFFSET_TABLE_
+ is 31 bits.
+
+ TARGET_CM_EMBMEDANY: 64 bit address space.
+ The text and data segments have a maximum size of 31 bits
+ and may be located anywhere. Register %g4 contains
+ the start address of the data segment.
+*/
+
+enum cmodel {
+ CM_32,
+ CM_MEDLOW,
+ CM_MEDMID,
+ CM_MEDANY,
+ CM_EMBMEDANY
+};
+
+/* Value of -mcmodel specified by user. */
+extern char *sparc_cmodel_string;
+/* One of CM_FOO. */
+extern enum cmodel sparc_cmodel;
+
+/* V9 code model selection. */
+#define TARGET_CM_MEDLOW (sparc_cmodel == CM_MEDLOW)
+#define TARGET_CM_MEDMID (sparc_cmodel == CM_MEDMID)
+#define TARGET_CM_MEDANY (sparc_cmodel == CM_MEDANY)
+#define TARGET_CM_EMBMEDANY (sparc_cmodel == CM_EMBMEDANY)
+
+#define SPARC_DEFAULT_CMODEL CM_MEDLOW
+
+/* This is call-clobbered in the normal ABI, but is reserved in the
+ home grown (aka upward compatible) embedded ABI. */
+#define EMBMEDANY_BASE_REG "%g4"
+
+/* Values of TARGET_CPU_DEFAULT, set via -D in the Makefile,
+ and specified by the user via --with-cpu=foo.
+ This specifies the cpu implementation, not the architecture size. */
+#define TARGET_CPU_sparc 0
+#define TARGET_CPU_v7 0 /* alias for previous */
+#define TARGET_CPU_sparclet 1
+#define TARGET_CPU_sparclite 2
+#define TARGET_CPU_v8 3 /* generic v8 implementation */
+#define TARGET_CPU_supersparc 4
+#define TARGET_CPU_v9 5 /* generic v9 implementation */
+#define TARGET_CPU_sparc64 5 /* alias */
+#define TARGET_CPU_ultrasparc 6
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparc || TARGET_CPU_DEFAULT == TARGET_CPU_v8 || TARGET_CPU_DEFAULT == TARGET_CPU_supersparc
+#define CPP_CPU_DEFAULT_SPEC ""
+#define ASM_CPU_DEFAULT_SPEC ""
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclet
+#define CPP_CPU_DEFAULT_SPEC "-D__sparclet__"
+#define ASM_CPU_DEFAULT_SPEC "-Asparclet"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclite
+#define CPP_CPU_DEFAULT_SPEC "-D__sparclite__"
+#define ASM_CPU_DEFAULT_SPEC "-Asparclite"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9
+/* ??? What does Sun's CC pass? */
+#define CPP_CPU_DEFAULT_SPEC "-D__sparc_v9__"
+/* ??? It's not clear how other assemblers will handle this, so by default
+ use GAS. Sun's Solaris assembler recognizes -xarch=v8plus, but this case
+ is handled in sol2.h. */
+#define ASM_CPU_DEFAULT_SPEC "-Av9"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+#define CPP_CPU_DEFAULT_SPEC "-D__sparc_v9__"
+#define ASM_CPU_DEFAULT_SPEC "-Av9a"
+#endif
+#ifndef CPP_CPU_DEFAULT_SPEC
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+
+/* Names to predefine in the preprocessor for this target machine.
+ ??? It would be nice to not include any subtarget specific values here,
+ however there's no way to portably provide subtarget values to
+ CPP_PREFINES. Also, -D values in CPP_SUBTARGET_SPEC don't get turned into
+ foo, __foo and __foo__. */
+
+#define CPP_PREDEFINES "-Dsparc -Dsun -Dunix -Asystem(unix) -Asystem(bsd)"
+
+/* Define macros to distinguish architectures. */
+
+/* Common CPP definitions used by CPP_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#define CPP_CPU_SPEC "\
+%{mcypress:} \
+%{msparclite:-D__sparclite__} \
+%{mf930:-D__sparclite__} %{mf934:-D__sparclite__} \
+%{mv8:-D__sparc_v8__} \
+%{msupersparc:-D__supersparc__ -D__sparc_v8__} \
+%{mcpu=sparclet:-D__sparclet__} %{mcpu=tsc701:-D__sparclet__} \
+%{mcpu=sparclite:-D__sparclite__} \
+%{mcpu=f930:-D__sparclite__} %{mcpu=f934:-D__sparclite__} \
+%{mcpu=v8:-D__sparc_v8__} \
+%{mcpu=supersparc:-D__supersparc__ -D__sparc_v8__} \
+%{mcpu=v9:-D__sparc_v9__} \
+%{mcpu=ultrasparc:-D__sparc_v9__} \
+%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(cpp_cpu_default)}}}}}}} \
+"
+
+/* ??? The GCC_NEW_VARARGS macro is now obsolete, because gcc always uses
+ the right varags.h file when bootstrapping. */
+/* ??? It's not clear what value we want to use for -Acpu/machine for
+ sparc64 in 32 bit environments, so for now we only use `sparc64' in
+ 64 bit environments. */
+
+#define CPP_ARCH32_SPEC "-D__GCC_NEW_VARARGS__ -Acpu(sparc) -Amachine(sparc)"
+#define CPP_ARCH64_SPEC "-D__arch64__ -Acpu(sparc64) -Amachine(sparc64)"
+#define CPP_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? CPP_ARCH32_SPEC : CPP_ARCH64_SPEC)
+
+#define CPP_ARCH_SPEC "\
+%{m32:%(cpp_arch32)} \
+%{m64:%(cpp_arch64)} \
+%{!m32:%{!m64:%(cpp_arch_default)}} \
+"
+
+/* Macros to distinguish endianness. */
+#define CPP_ENDIAN_SPEC "%{mlittle-endian:-D__LITTLE_ENDIAN__}"
+
+/* Macros to distinguish the particular subtarget. */
+#define CPP_SUBTARGET_SPEC ""
+
+#define CPP_SPEC "%(cpp_cpu) %(cpp_arch) %(cpp_endian) %(cpp_subtarget)"
+
+/* Prevent error on `-sun4' and `-target sun4' options. */
+/* This used to translate -dalign to -malign, but that is no good
+ because it can't turn off the usual meaning of making debugging dumps. */
+/* Translate old style -m<cpu> into new style -mcpu=<cpu>.
+ ??? Delete support for -m<cpu> for 2.9. */
+
+#define CC1_SPEC "\
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+"
+
+/* Override in target specific files. */
+#define ASM_CPU_SPEC "\
+%{mcpu=sparclet:-Asparclet} %{mcpu=tsc701:-Asparclet} \
+%{msparclite:-Asparclite} \
+%{mf930:-Asparclite} %{mf934:-Asparclite} \
+%{mcpu=sparclite:-Asparclite} \
+%{mcpu=f930:-Asparclite} %{mcpu=f934:-Asparclite} \
+%{mv8plus:-Av8plus} \
+%{mcpu=v9:-Av9} \
+%{mcpu=ultrasparc:%{!mv8plus:-Av9a}} \
+%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(asm_cpu_default)}}}}}}} \
+"
+
+/* Word size selection, among other things.
+ This is what GAS uses. Add %(asm_arch) to ASM_SPEC to enable. */
+
+#define ASM_ARCH32_SPEC "-32"
+#define ASM_ARCH64_SPEC "-64"
+#define ASM_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? ASM_ARCH32_SPEC : ASM_ARCH64_SPEC)
+
+#define ASM_ARCH_SPEC "\
+%{m32:%(asm_arch32)} \
+%{m64:%(asm_arch64)} \
+%{!m32:%{!m64:%(asm_arch_default)}} \
+"
+
+/* Special flags to the Sun-4 assembler when using pipe for input. */
+
+#define ASM_SPEC "\
+%| %{R} %{!pg:%{!p:%{fpic:-k} %{fPIC:-k}}} %{keep-local-as-symbols:-L} \
+%(asm_cpu) \
+"
+
+#define LIB_SPEC "%{!shared:%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p} %{g:-lg}}"
+
+/* Provide required defaults for linker -e and -d switches. */
+
+#define LINK_SPEC \
+ "%{!shared:%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp} %{static:-Bstatic} \
+ %{assert*} %{shared:%{!mimpure-text:-assert pure-text}}"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define EXTRA_SPECS \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC }, \
+ { "cpp_arch32", CPP_ARCH32_SPEC }, \
+ { "cpp_arch64", CPP_ARCH64_SPEC }, \
+ { "cpp_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_arch", CPP_ARCH_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_subtarget", CPP_SUBTARGET_SPEC }, \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "asm_cpu_default", ASM_CPU_DEFAULT_SPEC }, \
+ { "asm_arch32", ASM_ARCH32_SPEC }, \
+ { "asm_arch64", ASM_ARCH64_SPEC }, \
+ { "asm_arch_default", ASM_ARCH_DEFAULT_SPEC }, \
+ { "asm_arch", ASM_ARCH_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+
+#ifdef SPARC_BI_ARCH
+#define NO_BUILTIN_PTRDIFF_TYPE
+#define NO_BUILTIN_SIZE_TYPE
+#endif
+#define PTRDIFF_TYPE (TARGET_ARCH64 ? "long int" : "int")
+#define SIZE_TYPE (TARGET_ARCH64 ? "long unsigned int" : "unsigned int")
+
+/* ??? This should be 32 bits for v9 but what can we do? */
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+#define MAX_WCHAR_TYPE_SIZE 16
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* To make profiling work with -f{pic,PIC}, we need to emit the profiling
+ code into the rtl. Also, if we are profiling, we cannot eliminate
+ the frame pointer (because the return address will get smashed). */
+
+void sparc_override_options ();
+
+#define OVERRIDE_OPTIONS \
+ do { \
+ if (profile_flag || profile_block_flag || profile_arc_flag) \
+ { \
+ if (flag_pic) \
+ { \
+ char *pic_string = (flag_pic == 1) ? "-fpic" : "-fPIC"; \
+ warning ("%s and profiling conflict: disabling %s", \
+ pic_string, pic_string); \
+ flag_pic = 0; \
+ } \
+ flag_omit_frame_pointer = 0; \
+ } \
+ sparc_override_options (); \
+ SUBTARGET_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* This is meant to be redefined in the host dependent files. */
+#define SUBTARGET_OVERRIDE_OPTIONS
+
+/* These compiler options take an argument. We ignore -target for now. */
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp (STR, "target") || !strcmp (STR, "assert"))
+
+/* Print subsidiary information on the compiler version in use. */
+
+#define TARGET_VERSION fprintf (stderr, " (sparc)");
+
+/* Generate DBX debugging information. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* Nonzero if we should generate code to use the fpu. */
+#define MASK_FPU 1
+#define TARGET_FPU (target_flags & MASK_FPU)
+
+/* Nonzero if we should use FUNCTION_EPILOGUE. Otherwise, we
+ use fast return insns, but lose some generality. */
+#define MASK_EPILOGUE 2
+#define TARGET_EPILOGUE (target_flags & MASK_EPILOGUE)
+
+/* Nonzero if we should assume that double pointers might be unaligned.
+ This can happen when linking gcc compiled code with other compilers,
+ because the ABI only guarantees 4 byte alignment. */
+#define MASK_UNALIGNED_DOUBLES 4
+#define TARGET_UNALIGNED_DOUBLES (target_flags & MASK_UNALIGNED_DOUBLES)
+
+/* Nonzero means that we should generate code for a v8 sparc. */
+#define MASK_V8 0x8
+#define TARGET_V8 (target_flags & MASK_V8)
+
+/* Nonzero means that we should generate code for a sparclite.
+ This enables the sparclite specific instructions, but does not affect
+ whether FPU instructions are emitted. */
+#define MASK_SPARCLITE 0x10
+#define TARGET_SPARCLITE (target_flags & MASK_SPARCLITE)
+
+/* Nonzero if we're compiling for the sparclet. */
+#define MASK_SPARCLET 0x20
+#define TARGET_SPARCLET (target_flags & MASK_SPARCLET)
+
+/* Nonzero if we're compiling for v9 sparc.
+ Note that v9's can run in 32 bit mode so this doesn't necessarily mean
+ the word size is 64. */
+#define MASK_V9 0x40
+#define TARGET_V9 (target_flags & MASK_V9)
+
+/* Non-zero to generate code that uses the instructions deprecated in
+ the v9 architecture. This option only applies to v9 systems. */
+/* ??? This isn't user selectable yet. It's used to enable such insns
+ on 32 bit v9 systems and for the moment they're permanently disabled
+ on 64 bit v9 systems. */
+#define MASK_DEPRECATED_V8_INSNS 0x80
+#define TARGET_DEPRECATED_V8_INSNS (target_flags & MASK_DEPRECATED_V8_INSNS)
+
+/* Mask of all CPU selection flags. */
+#define MASK_ISA \
+(MASK_V8 + MASK_SPARCLITE + MASK_SPARCLET + MASK_V9 + MASK_DEPRECATED_V8_INSNS)
+
+/* Non-zero means don't pass `-assert pure-text' to the linker. */
+#define MASK_IMPURE_TEXT 0x100
+#define TARGET_IMPURE_TEXT (target_flags & MASK_IMPURE_TEXT)
+
+/* Nonzero means that we should generate code using a flat register window
+ model, i.e. no save/restore instructions are generated, which is
+ compatible with normal sparc code.
+ The frame pointer is %i7 instead of %fp. */
+#define MASK_FLAT 0x200
+#define TARGET_FLAT (target_flags & MASK_FLAT)
+
+/* Nonzero means use the registers that the Sparc ABI reserves for
+ application software. This must be the default to coincide with the
+ setting in FIXED_REGISTERS. */
+#define MASK_APP_REGS 0x400
+#define TARGET_APP_REGS (target_flags & MASK_APP_REGS)
+
+/* Option to select how quad word floating point is implemented.
+ When TARGET_HARD_QUAD is true, we use the hardware quad instructions.
+ Otherwise, we use the SPARC ABI quad library functions. */
+#define MASK_HARD_QUAD 0x800
+#define TARGET_HARD_QUAD (target_flags & MASK_HARD_QUAD)
+
+/* Non-zero on little-endian machines. */
+/* ??? Little endian support currently only exists for sparclet-aout and
+ sparc64-elf configurations. May eventually want to expand the support
+ to all targets, but for now it's kept local to only those two. */
+#define MASK_LITTLE_ENDIAN 0x1000
+#define TARGET_LITTLE_ENDIAN (target_flags & MASK_LITTLE_ENDIAN)
+
+/* 0x2000, 0x4000 are unused */
+
+/* Nonzero if pointers are 64 bits.
+ This is not a user selectable option, though it may be one day -
+ so it is used to determine pointer size instead of an architecture flag. */
+#define MASK_PTR64 0x8000
+#define TARGET_PTR64 (target_flags & MASK_PTR64)
+
+/* Nonzero if generating code to run in a 64 bit environment.
+ This is intended to only be used by TARGET_ARCH{32,64} as they are the
+ mechanism used to control compile time or run time selection. */
+#define MASK_64BIT 0x10000
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+
+/* 0x20000,0x40000 unused */
+
+/* Non-zero means use a stack bias of 2047. Stack offsets are obtained by
+ adding 2047 to %sp. This option is for v9 only and is the default. */
+#define MASK_STACK_BIAS 0x80000
+#define TARGET_STACK_BIAS (target_flags & MASK_STACK_BIAS)
+
+/* Non-zero means %g0 is a normal register.
+ We still clobber it as necessary, but we can't rely on it always having
+ a zero value.
+ We don't bother to support this in true 64 bit mode. */
+#define MASK_LIVE_G0 0x100000
+#define TARGET_LIVE_G0 (target_flags & MASK_LIVE_G0)
+
+/* Non-zero means the cpu has broken `save' and `restore' insns, only
+ the trivial versions work (save %g0,%g0,%g0; restore %g0,%g0,%g0).
+ We assume the environment will properly handle or otherwise avoid
+ trouble associated with an interrupt occurring after the `save' or trap
+ occurring during it. */
+#define MASK_BROKEN_SAVERESTORE 0x200000
+#define TARGET_BROKEN_SAVERESTORE (target_flags & MASK_BROKEN_SAVERESTORE)
+
+/* Non-zero means -m{,no-}fpu was passed on the command line. */
+#define MASK_FPU_SET 0x400000
+#define TARGET_FPU_SET (target_flags & MASK_FPU_SET)
+
+/* Use the UltraSPARC Visual Instruction Set extensions. */
+#define MASK_VIS 0x1000000
+#define TARGET_VIS (target_flags & MASK_VIS)
+
+/* Compile for Solaris V8+. 32 bit Solaris preserves the high bits of
+ the current out and global registers. Linux saves the high bits on
+ context switches but not signals. */
+#define MASK_V8PLUS 0x2000000
+#define TARGET_V8PLUS (target_flags & MASK_V8PLUS)
+
+/* TARGET_HARD_MUL: Use hardware multiply instructions but not %y.
+ TARGET_HARD_MUL32: Use hardware multiply instructions with rd %y
+ to get high 32 bits. False in V8+ or V9 because multiply stores
+ a 64 bit result in a register. */
+
+#define TARGET_HARD_MUL32 \
+ ((TARGET_V8 || TARGET_SPARCLITE \
+ || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS) \
+ && ! TARGET_V8PLUS)
+
+#define TARGET_HARD_MUL \
+ (TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET \
+ || TARGET_DEPRECATED_V8_INSNS || TARGET_V8PLUS)
+
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { {"fpu", MASK_FPU | MASK_FPU_SET}, \
+ {"no-fpu", -MASK_FPU}, \
+ {"no-fpu", MASK_FPU_SET}, \
+ {"hard-float", MASK_FPU | MASK_FPU_SET}, \
+ {"soft-float", -MASK_FPU}, \
+ {"soft-float", MASK_FPU_SET}, \
+ {"epilogue", MASK_EPILOGUE}, \
+ {"no-epilogue", -MASK_EPILOGUE}, \
+ {"unaligned-doubles", MASK_UNALIGNED_DOUBLES}, \
+ {"no-unaligned-doubles", -MASK_UNALIGNED_DOUBLES}, \
+ {"impure-text", MASK_IMPURE_TEXT}, \
+ {"no-impure-text", -MASK_IMPURE_TEXT}, \
+ {"flat", MASK_FLAT}, \
+ {"no-flat", -MASK_FLAT}, \
+ {"app-regs", MASK_APP_REGS}, \
+ {"no-app-regs", -MASK_APP_REGS}, \
+ {"hard-quad-float", MASK_HARD_QUAD}, \
+ {"soft-quad-float", -MASK_HARD_QUAD}, \
+ {"v8plus", MASK_V8PLUS}, \
+ {"no-v8plus", -MASK_V8PLUS}, \
+ {"vis", MASK_VIS}, \
+ /* ??? These are deprecated, coerced to -mcpu=. Delete in 2.9. */ \
+ {"cypress", 0}, \
+ {"sparclite", 0}, \
+ {"f930", 0}, \
+ {"f934", 0}, \
+ {"v8", 0}, \
+ {"supersparc", 0}, \
+ /* End of deprecated options. */ \
+ /* -mptrNN exists for *experimental* purposes. */ \
+/* {"ptr64", MASK_PTR64}, */ \
+/* {"ptr32", -MASK_PTR64}, */ \
+ {"32", -MASK_64BIT}, \
+ {"64", MASK_64BIT}, \
+ {"stack-bias", MASK_STACK_BIAS}, \
+ {"no-stack-bias", -MASK_STACK_BIAS}, \
+ SUBTARGET_SWITCHES \
+ { "", TARGET_DEFAULT}}
+
+/* MASK_APP_REGS must always be the default because that's what
+ FIXED_REGISTERS is set to and -ffixed- is processed before
+ CONDITIONAL_REGISTER_USAGE is called (where we process -mno-app-regs). */
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU)
+
+/* This is meant to be redefined in target specific files. */
+#define SUBTARGET_SWITCHES
+
+/* Processor type.
+ These must match the values for the cpu attribute in sparc.md. */
+enum processor_type {
+ PROCESSOR_V7,
+ PROCESSOR_CYPRESS,
+ PROCESSOR_V8,
+ PROCESSOR_SUPERSPARC,
+ PROCESSOR_SPARCLITE,
+ PROCESSOR_F930,
+ PROCESSOR_F934,
+ PROCESSOR_SPARCLET,
+ PROCESSOR_TSC701,
+ PROCESSOR_V9,
+ PROCESSOR_ULTRASPARC
+};
+
+/* This is set from -m{cpu,tune}=xxx. */
+extern enum processor_type sparc_cpu;
+
+/* Recast the cpu class to be the cpu attribute.
+ Every file includes us, but not every file includes insn-attr.h. */
+#define sparc_cpu_attr ((enum attr_cpu) sparc_cpu)
+
+/* This macro is similar to `TARGET_SWITCHES' but defines names of
+ command options that have values. Its definition is an
+ initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ fixed part of the option name, and the address of a variable.
+ The variable, type `char *', is set to the variable part of the
+ given option if the fixed part matches. The actual option name
+ is made by appending `-m' to the specified name.
+
+ Here is an example which defines `-mshort-data-NUMBER'. If the
+ given option is `-mshort-data-512', the variable `m88k_short_data'
+ will be set to the string `"512"'.
+
+ extern char *m88k_short_data;
+ #define TARGET_OPTIONS { { "short-data-", &m88k_short_data } } */
+
+#define TARGET_OPTIONS \
+{ \
+ { "cpu=", &sparc_select[1].string }, \
+ { "tune=", &sparc_select[2].string }, \
+ { "cmodel=", &sparc_cmodel_string }, \
+ { "align-loops=", &sparc_align_loops_string }, \
+ { "align-jumps=", &sparc_align_jumps_string }, \
+ { "align-functions=", &sparc_align_funcs_string }, \
+ SUBTARGET_OPTIONS \
+}
+
+/* This is meant to be redefined in target specific files. */
+#define SUBTARGET_OPTIONS
+
+/* sparc_select[0] is reserved for the default cpu. */
+struct sparc_cpu_select
+{
+ char *string;
+ char *name;
+ int set_tune_p;
+ int set_arch_p;
+};
+
+extern struct sparc_cpu_select sparc_select[];
+
+/* Variables to record values the user passes. */
+extern char *sparc_align_loops_string;
+extern char *sparc_align_jumps_string;
+extern char *sparc_align_funcs_string;
+/* Parsed values as a power of two. */
+extern int sparc_align_loops;
+extern int sparc_align_jumps;
+extern int sparc_align_funcs;
+
+#define DEFAULT_SPARC_ALIGN_FUNCS \
+(sparc_cpu == PROCESSOR_ULTRASPARC ? 5 : 2)
+
+/* target machine storage layout */
+
+/* Define for cross-compilation to a sparc target with no TFmode from a host
+ with a different float format (e.g. VAX). */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Define this to set the endianness to use in libgcc2.c, which can
+ not depend on target_flags. */
+#if defined (__LITTLE_ENDIAN__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#endif
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD (TARGET_ARCH64 ? 64 : 32)
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_ARCH64 ? 8 : 4)
+#define MIN_UNITS_PER_WORD 4
+
+/* Now define the sizes of the C data types. */
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE (TARGET_ARCH64 ? 64 : 32)
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+
+#if defined (SPARC_BI_ARCH)
+#define MAX_LONG_TYPE_SIZE 64
+#endif
+
+#if 0
+/* ??? This does not work in SunOS 4.x, so it is not enabled here.
+ Instead, it is enabled in sol2.h, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE (TARGET_PTR64 ? 64 : 32)
+
+/* A macro to update MODE and UNSIGNEDP when an object whose type
+ is TYPE and which has the specified mode and signedness is to be
+ stored in a register. This macro is only called when TYPE is a
+ scalar type. */
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+if (TARGET_ARCH64 \
+ && GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+{ \
+ (MODE) = DImode; \
+}
+
+/* Define this macro if the promotion described by PROMOTE_MODE
+ should also be done for outgoing function arguments. */
+/* This is only needed for TARGET_ARCH64, but since PROMOTE_MODE is a no-op
+ for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test
+ for this value. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define this macro if the promotion described by PROMOTE_MODE
+ should also be done for the return value of functions.
+ If this macro is defined, FUNCTION_VALUE must perform the same
+ promotions done by PROMOTE_MODE. */
+/* This is only needed for TARGET_ARCH64, but since PROMOTE_MODE is a no-op
+ for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test
+ for this value. */
+#define PROMOTE_FUNCTION_RETURN
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY (TARGET_ARCH64 ? 64 : 32)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY (TARGET_ARCH64 ? 128 : 64)
+
+/* ALIGN FRAMES on double word boundaries */
+
+#define SPARC_STACK_ALIGN(LOC) \
+ (TARGET_ARCH64 ? (((LOC)+15) & ~15) : (((LOC)+7) & ~7))
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY (1 << (sparc_align_funcs + 3))
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY (TARGET_ARCH64 ? 64 : 32)
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT (TARGET_ARCH64 ? 128 : 64)
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 64
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Things that must be doubleword aligned cannot go in the text section,
+ because the linker fails to align the text section enough!
+ Put them in the data section. This macro is only used in this file. */
+#define MAX_TEXT_ALIGN 32
+
+/* This forces all variables and constants to the data section when PIC.
+ This is because the SunOS 4 shared library scheme thinks everything in
+ text is a function, and patches the address to point to a loader stub. */
+/* This is defined to zero for every system which doesn't use the a.out object
+ file format. */
+#ifndef SUNOS4_SHARED_LIBRARIES
+#define SUNOS4_SHARED_LIBRARIES 0
+#endif
+
+/* This is defined differently for v9 in a cover file. */
+#define SELECT_SECTION(T,RELOC) \
+{ \
+ if (TREE_CODE (T) == VAR_DECL) \
+ { \
+ if (TREE_READONLY (T) && ! TREE_SIDE_EFFECTS (T) \
+ && DECL_INITIAL (T) \
+ && (DECL_INITIAL (T) == error_mark_node \
+ || TREE_CONSTANT (DECL_INITIAL (T))) \
+ && DECL_ALIGN (T) <= MAX_TEXT_ALIGN \
+ && ! (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES))) \
+ text_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (T) == CONSTRUCTOR) \
+ { \
+ if (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES)) \
+ data_section (); \
+ } \
+ else if (TREE_CODE_CLASS (TREE_CODE (T)) == 'c') \
+ { \
+ if ((TREE_CODE (T) == STRING_CST && flag_writable_strings) \
+ || TYPE_ALIGN (TREE_TYPE (T)) > MAX_TEXT_ALIGN \
+ || (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES))) \
+ data_section (); \
+ else \
+ text_section (); \
+ } \
+}
+
+/* Use text section for a constant
+ unless we need more alignment than that offers. */
+/* This is defined differently for v9 in a cover file. */
+#define SELECT_RTX_SECTION(MODE, X) \
+{ \
+ if (GET_MODE_BITSIZE (MODE) <= MAX_TEXT_ALIGN \
+ && ! (flag_pic && (symbolic_operand (X) || SUNOS4_SHARED_LIBRARIES))) \
+ text_section (); \
+ else \
+ data_section (); \
+}
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ SPARC has 32 integer registers and 32 floating point registers.
+ 64 bit SPARC has 32 additional fp regs, but the odd numbered ones are not
+ accessible. We still account for them to simplify register computations
+ (eg: in CLASS_MAX_NREGS). There are also 4 fp condition code registers, so
+ 32+32+32+4 == 100.
+ Register 100 is used as the integer condition code register. */
+
+#define FIRST_PSEUDO_REGISTER 101
+
+#define SPARC_FIRST_FP_REG 32
+/* Additional V9 fp regs. */
+#define SPARC_FIRST_V9_FP_REG 64
+#define SPARC_LAST_V9_FP_REG 95
+/* V9 %fcc[0123]. V8 uses (figuratively) %fcc0. */
+#define SPARC_FIRST_V9_FCC_REG 96
+#define SPARC_LAST_V9_FCC_REG 99
+/* V8 fcc reg. */
+#define SPARC_FCC_REG 96
+/* Integer CC reg. We don't distinguish %icc from %xcc. */
+#define SPARC_ICC_REG 100
+
+/* Nonzero if REGNO is an fp reg. */
+#define SPARC_FP_REG_P(REGNO) \
+((REGNO) >= SPARC_FIRST_FP_REG && (REGNO) <= SPARC_LAST_V9_FP_REG)
+
+/* Argument passing regs. */
+#define SPARC_OUTGOING_INT_ARG_FIRST 8
+#define SPARC_INCOMING_INT_ARG_FIRST (TARGET_FLAT ? 8 : 24)
+#define SPARC_FP_ARG_FIRST 32
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ On non-v9 systems:
+ g1 is free to use as temporary.
+ g2-g4 are reserved for applications. Gcc normally uses them as
+ temporaries, but this can be disabled via the -mno-app-regs option.
+ g5 through g7 are reserved for the operating system.
+
+ On v9 systems:
+ g1,g5 are free to use as temporaries, and are free to use between calls
+ if the call is to an external function via the PLT.
+ g4 is free to use as a temporary in the non-embedded case.
+ g4 is reserved in the embedded case.
+ g2-g3 are reserved for applications. Gcc normally uses them as
+ temporaries, but this can be disabled via the -mno-app-regs option.
+ g6-g7 are reserved for the operating system (or application in
+ embedded case).
+ ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
+ currently be a fixed register until this pattern is rewritten.
+ Register 1 is also used when restoring call-preserved registers in large
+ stack frames.
+
+ Registers fixed in arch32 and not arch64 (or vice-versa) are marked in
+ CONDITIONAL_REGISTER_USAGE in order to properly handle -ffixed-.
+*/
+
+#define FIXED_REGISTERS \
+ {1, 0, 0, 0, 0, 0, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ 0, 0, 0, 0, 0}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ \
+ 1, 1, 1, 1, 1}
+
+/* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
+ they won't be allocated. */
+
+#define CONDITIONAL_REGISTER_USAGE \
+do \
+ { \
+ if (TARGET_ARCH32) \
+ { \
+ fixed_regs[5] = 1; \
+ } \
+ else \
+ { \
+ fixed_regs[1] = 1; \
+ } \
+ if (! TARGET_V9) \
+ { \
+ int regno; \
+ for (regno = SPARC_FIRST_V9_FP_REG; \
+ regno <= SPARC_LAST_V9_FP_REG; \
+ regno++) \
+ fixed_regs[regno] = 1; \
+ /* %fcc0 is used by v8 and v9. */ \
+ for (regno = SPARC_FIRST_V9_FCC_REG + 1; \
+ regno <= SPARC_LAST_V9_FCC_REG; \
+ regno++) \
+ fixed_regs[regno] = 1; \
+ } \
+ if (! TARGET_FPU) \
+ { \
+ int regno; \
+ for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++) \
+ fixed_regs[regno] = 1; \
+ } \
+ /* Don't unfix g2-g4 if they were fixed with -ffixed-. */ \
+ fixed_regs[2] |= ! TARGET_APP_REGS; \
+ fixed_regs[3] |= ! TARGET_APP_REGS; \
+ fixed_regs[4] |= ! TARGET_APP_REGS || TARGET_CM_EMBMEDANY; \
+ if (TARGET_FLAT) \
+ { \
+ /* Let the compiler believe the frame pointer is still \
+ %fp, but output it as %i7. */ \
+ fixed_regs[31] = 1; \
+ reg_names[FRAME_POINTER_REGNUM] = "%i7"; \
+ /* ??? This is a hack to disable leaf functions. */ \
+ global_regs[7] = 1; \
+ } \
+ if (profile_block_flag) \
+ { \
+ /* %g1 and %g2 must be fixed, because BLOCK_PROFILER \
+ uses them. */ \
+ fixed_regs[1] = 1; \
+ fixed_regs[2] = 1; \
+ } \
+ if (flag_pic != 0) \
+ { \
+ fixed_regs[23] = 1; \
+ call_used_regs[23] = 1; \
+ } \
+ } \
+while (0)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On SPARC, ordinary registers hold 32 bits worth;
+ this means both integer and floating point registers.
+ On v9, integer regs hold 64 bits worth; floating point regs hold
+ 32 bits worth (this includes the new fp regs as even the odd ones are
+ included in the hard register count). */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (TARGET_ARCH64 \
+ ? ((REGNO) < 32 \
+ ? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD \
+ : (GET_MODE_SIZE (MODE) + 3) / 4) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* A subreg in 64 bit mode will have the wrong offset for a floating point
+ register. The least significant part is at offset 1, compared to 0 for
+ integer registers. */
+#define ALTER_HARD_SUBREG(TMODE, WORD, FMODE, REGNO) \
+ (TARGET_ARCH64 && (REGNO) >= 32 && (REGNO) < 96 && (TMODE) == SImode ? 1 : ((REGNO) + (WORD)))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ See sparc.c for how we initialize this. */
+extern int *hard_regno_mode_classes;
+extern int sparc_mode_class[];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((hard_regno_mode_classes[REGNO] & sparc_mode_class[MODE]) != 0)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output.
+
+ For V9: SFmode can't be combined with other float modes, because they can't
+ be allocated to the %d registers. Also, DFmode won't fit in odd %f
+ registers, but SFmode will. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) \
+ || (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2) \
+ && (! TARGET_V9 \
+ || (GET_MODE_CLASS (MODE1) != MODE_FLOAT \
+ || (MODE1 != SFmode && MODE2 != SFmode)))))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* SPARC pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 14
+
+/* Actual top-of-stack address is 92/176 greater than the contents of the
+ stack pointer register for !v9/v9. That is:
+ - !v9: 64 bytes for the in and local registers, 4 bytes for structure return
+ address, and 6*4 bytes for the 6 register parameters.
+ - v9: 128 bytes for the in and local registers + 6*8 bytes for the integer
+ parameter regs. */
+#define STACK_POINTER_OFFSET FIRST_PARM_OFFSET(0)
+
+/* The stack bias (amount by which the hardware register is offset by). */
+#define SPARC_STACK_BIAS ((TARGET_ARCH64 && TARGET_STACK_BIAS) ? 2047 : 0)
+
+/* Is stack biased? */
+#define STACK_BIAS SPARC_STACK_BIAS
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 30
+
+#if 0
+/* Register that is used for the return address for the flat model. */
+#define RETURN_ADDR_REGNUM 15
+#endif
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c.
+ Used in flow.c, global.c, and reload1.c.
+
+ Being a non-leaf function does not mean a frame pointer is needed in the
+ flat window model. However, the debugger won't be able to backtrace through
+ us with out it. */
+#define FRAME_POINTER_REQUIRED \
+ (TARGET_FLAT ? (current_function_calls_alloca || current_function_varargs \
+ || !leaf_function_p ()) \
+ : ! (leaf_function_p () && only_leaf_regs_used ()))
+
+/* C statement to store the difference between the frame pointer
+ and the stack pointer values immediately after the function prologue.
+
+ Note, we always pretend that this is a leaf function because if
+ it's not, there's no point in trying to eliminate the
+ frame pointer. If it is a leaf function, we guessed right! */
+#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
+ ((VAR) = (TARGET_FLAT ? sparc_flat_compute_frame_size (get_frame_size ()) \
+ : compute_frame_size (get_frame_size (), 1)))
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM
+
+/* Register in which static-chain is passed to a function. This must
+ not be a register used by the prologue. */
+#define STATIC_CHAIN_REGNUM (TARGET_ARCH64 ? 5 : 2)
+
+/* Register which holds offset table for position-independent
+ data references. */
+
+#define PIC_OFFSET_TABLE_REGNUM 23
+
+#define INITIALIZE_PIC initialize_pic ()
+#define FINALIZE_PIC finalize_pic ()
+
+/* Pick a default value we can notice from override_options:
+ !v9: Default is on.
+ v9: Default is off. */
+
+#define DEFAULT_PCC_STRUCT_RETURN -1
+
+/* Sparc ABI says that quad-precision floats and all structures are returned
+ in memory.
+ For v9: unions <= 32 bytes in size are returned in int regs,
+ structures up to 32 bytes are returned in int and fp regs. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+(TARGET_ARCH32 \
+ ? (TYPE_MODE (TYPE) == BLKmode \
+ || TYPE_MODE (TYPE) == TFmode \
+ || TYPE_MODE (TYPE) == TCmode) \
+ : (TYPE_MODE (TYPE) == BLKmode \
+ && int_size_in_bytes (TYPE) > 32))
+
+/* Functions which return large structures get the address
+ to place the wanted value at offset 64 from the frame.
+ Must reserve 64 bytes for the in and local registers.
+ v9: Functions which return large structures get the address to place the
+ wanted value from an invisible first argument. */
+/* Used only in other #defines in this file. */
+#define STRUCT_VALUE_OFFSET 64
+
+#define STRUCT_VALUE \
+ (TARGET_ARCH64 \
+ ? 0 \
+ : gen_rtx_MEM (Pmode, \
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, \
+ GEN_INT (STRUCT_VALUE_OFFSET))))
+#define STRUCT_VALUE_INCOMING \
+ (TARGET_ARCH64 \
+ ? 0 \
+ : gen_rtx_MEM (Pmode, \
+ gen_rtx_PLUS (Pmode, frame_pointer_rtx, \
+ GEN_INT (STRUCT_VALUE_OFFSET))))
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The SPARC has various kinds of registers: general, floating point,
+ and condition codes [well, it has others as well, but none that we
+ care directly about].
+
+ For v9 we must distinguish between the upper and lower floating point
+ registers because the upper ones can't hold SFmode values.
+ HARD_REGNO_MODE_OK won't help here because reload assumes that register(s)
+ satisfying a group need for a class will also satisfy a single need for
+ that class. EXTRA_FP_REGS is a bit of a misnomer as it covers all 64 fp
+ regs.
+
+ It is important that one class contains all the general and all the standard
+ fp regs. Otherwise find_reg() won't properly allocate int regs for moves,
+ because reg_class_record() will bias the selection in favor of fp regs,
+ because reg_class_subunion[GENERAL_REGS][FP_REGS] will yield FP_REGS,
+ because FP_REGS > GENERAL_REGS.
+
+ It is also important that one class contain all the general and all the
+ fp regs. Otherwise when spilling a DFmode reg, it may be from EXTRA_FP_REGS
+ but find_reloads() may use class GENERAL_OR_FP_REGS. This will cause
+ allocate_reload_reg() to bypass it causing an abort because the compiler
+ thinks it doesn't have a spill reg when in fact it does.
+
+ v9 also has 4 floating point condition code registers. Since we don't
+ have a class that is the union of FPCC_REGS with either of the others,
+ it is important that it appear first. Otherwise the compiler will die
+ trying to compile _fixunsdfsi because fix_truncdfsi2 won't match its
+ constraints.
+
+ It is important that SPARC_ICC_REG have class NO_REGS. Otherwise combine
+ may try to use it to hold an SImode value. See register_operand.
+ ??? Should %fcc[0123] be handled similarly?
+*/
+
+enum reg_class { NO_REGS, FPCC_REGS, I64_REGS, GENERAL_REGS, FP_REGS,
+ EXTRA_FP_REGS, GENERAL_OR_FP_REGS, GENERAL_OR_EXTRA_FP_REGS,
+ ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "FPCC_REGS", "I64_REGS", "GENERAL_REGS", "FP_REGS", \
+ "EXTRA_FP_REGS", "GENERAL_OR_FP_REGS", "GENERAL_OR_EXTRA_FP_REGS", \
+ "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+ {{0, 0, 0, 0}, {0, 0, 0, 0xf}, {0xffff, 0, 0, 0}, \
+ {-1, 0, 0, 0}, {0, -1, 0, 0}, {0, -1, -1, 0}, \
+ {-1, -1, 0, 0}, {-1, -1, -1, 0}, {-1, -1, -1, 0x1f}}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+extern enum reg_class sparc_regno_reg_class[];
+
+#define REGNO_REG_CLASS(REGNO) sparc_regno_reg_class[(REGNO)]
+
+/* This is the order in which to allocate registers normally.
+
+ We put %f0/%f1 last among the float registers, so as to make it more
+ likely that a pseudo-register which dies in the float return register
+ will get allocated to the float return register, thus saving a move
+ instruction at the end of the function. */
+
+#define REG_ALLOC_ORDER \
+{ 8, 9, 10, 11, 12, 13, 2, 3, \
+ 15, 16, 17, 18, 19, 20, 21, 22, \
+ 23, 24, 25, 26, 27, 28, 29, 31, \
+ 34, 35, 36, 37, 38, 39, /* %f2-%f7 */ \
+ 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
+ 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
+ 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
+ 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
+ 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
+ 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
+ 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
+ 32, 33, /* %f0,%f1 */ \
+ 96, 97, 98, 99, 100, /* %fcc0-3, %icc */ \
+ 1, 4, 5, 6, 7, 0, 14, 30}
+
+/* This is the order in which to allocate registers for
+ leaf functions. If all registers can fit in the "i" registers,
+ then we have the possibility of having a leaf function. */
+
+#define REG_LEAF_ALLOC_ORDER \
+{ 2, 3, 24, 25, 26, 27, 28, 29, \
+ 15, 8, 9, 10, 11, 12, 13, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 34, 35, 36, 37, 38, 39, \
+ 40, 41, 42, 43, 44, 45, 46, 47, \
+ 48, 49, 50, 51, 52, 53, 54, 55, \
+ 56, 57, 58, 59, 60, 61, 62, 63, \
+ 64, 65, 66, 67, 68, 69, 70, 71, \
+ 72, 73, 74, 75, 76, 77, 78, 79, \
+ 80, 81, 82, 83, 84, 85, 86, 87, \
+ 88, 89, 90, 91, 92, 93, 94, 95, \
+ 32, 33, \
+ 96, 97, 98, 99, 100, \
+ 1, 4, 5, 6, 7, 0, 14, 30, 31}
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC order_regs_for_local_alloc ()
+
+/* ??? %g7 is not a leaf register to effectively #undef LEAF_REGISTERS when
+ -mflat is used. Function only_leaf_regs_used will return 0 if a global
+ register is used and is not permitted in a leaf function. We make %g7
+ a global reg if -mflat and voila. Since %g7 is a system register and is
+ fixed it won't be used by gcc anyway. */
+
+#define LEAF_REGISTERS \
+{ 1, 1, 1, 1, 1, 1, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1}
+
+extern char leaf_reg_remap[];
+#define LEAF_REG_REMAP(REGNO) (leaf_reg_remap[REGNO])
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Local macro to handle the two v9 classes of FP regs. */
+#define FP_REG_CLASS_P(CLASS) ((CLASS) == FP_REGS || (CLASS) == EXTRA_FP_REGS)
+
+/* Get reg_class from a letter such as appears in the machine description.
+ In the not-v9 case, coerce v9's 'e' class to 'f', so we can use 'e' in the
+ .md file for v8 and v9.
+ 'd' and 'b' are used for single and double precision VIS operations,
+ if TARGET_VIS.
+ 'h' is used for V8+ 64 bit global and out registers. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+(TARGET_V9 \
+ ? ((C) == 'f' ? FP_REGS \
+ : (C) == 'e' ? EXTRA_FP_REGS \
+ : (C) == 'c' ? FPCC_REGS \
+ : ((C) == 'd' && TARGET_VIS) ? FP_REGS\
+ : ((C) == 'b' && TARGET_VIS) ? EXTRA_FP_REGS\
+ : ((C) == 'h' && TARGET_V8PLUS) ? I64_REGS\
+ : NO_REGS) \
+ : ((C) == 'f' ? FP_REGS \
+ : (C) == 'e' ? FP_REGS \
+ : (C) == 'c' ? FPCC_REGS \
+ : NO_REGS))
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ `I' is used for the range of constants an insn can actually contain.
+ `J' is used for the range which is just zero (since that is R0).
+ `K' is used for constants which can be loaded with a single sethi insn.
+ `L' is used for the range of constants supported by the movcc insns.
+ `M' is used for the range of constants supported by the movrcc insns. */
+
+#define SPARC_SIMM10_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x200 < 0x400)
+#define SPARC_SIMM11_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x400 < 0x800)
+#define SPARC_SIMM13_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x1000 < 0x2000)
+/* 10 and 11 bit immediates are only used for a few specific insns.
+ SMALL_INT is used throughout the port so we continue to use it. */
+#define SMALL_INT(X) (SPARC_SIMM13_P (INTVAL (X)))
+/* 13 bit immediate, considering only the low 32 bits */
+#define SMALL_INT32(X) (SPARC_SIMM13_P ((int)INTVAL (X) & 0xffffffff))
+#define SPARC_SETHI_P(X) \
+(((unsigned HOST_WIDE_INT) (X) & ~(unsigned HOST_WIDE_INT) 0xfffffc00) == 0)
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? SPARC_SIMM13_P (VALUE) \
+ : (C) == 'J' ? (VALUE) == 0 \
+ : (C) == 'K' ? SPARC_SETHI_P (VALUE) \
+ : (C) == 'L' ? SPARC_SIMM11_P (VALUE) \
+ : (C) == 'M' ? SPARC_SIMM10_P (VALUE) \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? fp_zero_operand (VALUE) \
+ : (C) == 'H' ? arith_double_operand (VALUE, DImode) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+/* We can't load constants into FP registers. We can't load any FP constant
+ if an 'E' constraint fails to match it. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (CONSTANT_P (X) \
+ && (FP_REG_CLASS_P (CLASS) \
+ || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && (HOST_FLOAT_FORMAT != IEEE_FLOAT_FORMAT \
+ || HOST_BITS_PER_INT != BITS_PER_WORD))) \
+ ? NO_REGS : (CLASS))
+
+/* Return the register class of a scratch register needed to load IN into
+ a register of class CLASS in MODE.
+
+ On the SPARC, when PIC, we need a temporary when loading some addresses
+ into a register.
+
+ Also, we need a temporary when loading/storing a HImode/QImode value
+ between memory and the FPU registers. This can happen when combine puts
+ a paradoxical subreg in a float/fix conversion insn. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, IN) \
+ ((FP_REG_CLASS_P (CLASS) && ((MODE) == HImode || (MODE) == QImode) \
+ && (GET_CODE (IN) == MEM \
+ || ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
+ && true_regnum (IN) == -1))) ? GENERAL_REGS : NO_REGS)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, IN) \
+ ((FP_REG_CLASS_P (CLASS) && ((MODE) == HImode || (MODE) == QImode) \
+ && (GET_CODE (IN) == MEM \
+ || ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
+ && true_regnum (IN) == -1))) ? GENERAL_REGS : NO_REGS)
+
+/* On SPARC it is not possible to directly move data between
+ GENERAL_REGS and FP_REGS. */
+#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \
+ (FP_REG_CLASS_P (CLASS1) != FP_REG_CLASS_P (CLASS2))
+
+/* Return the stack location to use for secondary memory needed reloads.
+ We want to use the reserved location just below the frame pointer.
+ However, we must ensure that there is a frame, so use assign_stack_local
+ if the frame size is zero. */
+#define SECONDARY_MEMORY_NEEDED_RTX(MODE) \
+ (get_frame_size () == 0 \
+ ? assign_stack_local (MODE, GET_MODE_SIZE (MODE), 0) \
+ : gen_rtx_MEM (MODE, gen_rtx_PLUS (Pmode, frame_pointer_rtx, \
+ GEN_INT (STARTING_FRAME_OFFSET))))
+
+/* Get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
+ because the movsi and movsf patterns don't handle r/f moves.
+ For v8 we copy the default definition. */
+#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \
+ (TARGET_ARCH64 \
+ ? (GET_MODE_BITSIZE (MODE) < 32 \
+ ? mode_for_size (32, GET_MODE_CLASS (MODE), 0) \
+ : MODE) \
+ : (GET_MODE_BITSIZE (MODE) < BITS_PER_WORD \
+ ? mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (MODE), 0) \
+ : MODE))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On SPARC, this is the size of MODE in words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (FP_REG_CLASS_P (CLASS) ? (GET_MODE_SIZE (MODE) + 3) / 4 \
+ : (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define the number of register that can hold parameters.
+ This macro is only used in other macro definitions below and in sparc.c.
+ MODE is the mode of the argument.
+ !v9: All args are passed in %o0-%o5.
+ v9: %o0-%o5 and %f0-%f31 are cumulatively used to pass values.
+ See the description in sparc.c. */
+#define NPARM_REGS(MODE) \
+(TARGET_ARCH64 \
+ ? (GET_MODE_CLASS (MODE) == MODE_FLOAT ? 32 : 6) \
+ : 6)
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+/* This allows space for one TFmode floating point value. */
+#define STARTING_FRAME_OFFSET \
+ (TARGET_ARCH64 ? (SPARC_STACK_BIAS - 16) \
+ : (-SPARC_STACK_ALIGN (LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On SPARC, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Offset of first parameter from the argument pointer register value.
+ !v9: This is 64 for the ins and locals, plus 4 for the struct-return reg
+ even if this function isn't going to use it.
+ v9: This is 128 for the ins and locals. */
+#define FIRST_PARM_OFFSET(FNDECL) \
+ (TARGET_ARCH64 ? (SPARC_STACK_BIAS + 16 * UNITS_PER_WORD) \
+ : (STRUCT_VALUE_OFFSET + UNITS_PER_WORD))
+
+/* When a parameter is passed in a register, stack space is still
+ allocated for it.
+ !v9: All 6 possible integer registers have backing store allocated.
+ v9: Only space for the arguments passed is allocated. */
+/* ??? Ideally, we'd use zero here (as the minimum), but zero has special
+ meaning to the backend. Further, we need to be able to detect if a
+ varargs/unprototyped function is called, as they may want to spill more
+ registers than we've provided space. Ugly, ugly. So for now we retain
+ all 6 slots even for v9. */
+#define REG_PARM_STACK_SPACE(DECL) (6 * UNITS_PER_WORD)
+
+/* Keep the stack pointer constant throughout the function.
+ This is both an optimization and a necessity: longjmp
+ doesn't behave itself when the stack pointer moves within
+ the function! */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Some subroutine macros specific to this machine.
+ When !TARGET_FPU, put float return values in the general registers,
+ since we don't have any fp registers. */
+#define BASE_RETURN_VALUE_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 : 8) \
+ : (((MODE) == SFmode || (MODE) == DFmode) && TARGET_FPU ? 32 : 8))
+
+#define BASE_OUTGOING_VALUE_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 \
+ : TARGET_FLAT ? 8 : 24) \
+ : (((MODE) == SFmode || (MODE) == DFmode) && TARGET_FPU ? 32 \
+ : (TARGET_FLAT ? 8 : 24)))
+
+#define BASE_PASSING_ARG_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 : 8) \
+ : 8)
+
+/* ??? FIXME -- seems wrong for v9 structure passing... */
+#define BASE_INCOMING_ARG_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 \
+ : TARGET_FLAT ? 8 : 24) \
+ : (TARGET_FLAT ? 8 : 24))
+
+/* Define this macro if the target machine has "register windows". This
+ C expression returns the register number as seen by the called function
+ corresponding to register number OUT as seen by the calling function.
+ Return OUT if register number OUT is not an outbound register. */
+
+#define INCOMING_REGNO(OUT) \
+ ((TARGET_FLAT || (OUT) < 8 || (OUT) > 15) ? (OUT) : (OUT) + 16)
+
+/* Define this macro if the target machine has "register windows". This
+ C expression returns the register number as seen by the calling function
+ corresponding to register number IN as seen by the called function.
+ Return IN if register number IN is not an inbound register. */
+
+#define OUTGOING_REGNO(IN) \
+ ((TARGET_FLAT || (IN) < 24 || (IN) > 31) ? (IN) : (IN) - 16)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+/* On SPARC the value is found in the first "output" register. */
+
+extern struct rtx_def *function_value ();
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ function_value ((VALTYPE), TYPE_MODE (VALTYPE), 1)
+
+/* But the called function leaves it in the first "input" register. */
+
+#define FUNCTION_OUTGOING_VALUE(VALTYPE, FUNC) \
+ function_value ((VALTYPE), TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ function_value (NULL_TREE, (MODE), 1)
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+ On SPARC, the first "output" reg is used for integer values,
+ and the first floating point register is used for floating point values. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 8 || (N) == 32)
+
+/* Define the size of space to allocate for the return value of an
+ untyped_call. */
+
+#define APPLY_RESULT_SIZE 16
+
+/* 1 if N is a possible register number for function argument passing.
+ On SPARC, these are the "output" registers. v9 also uses %f0-%f31. */
+
+#define FUNCTION_ARG_REGNO_P(N) \
+(TARGET_ARCH64 \
+ ? (((N) >= 8 && (N) <= 13) || ((N) >= 32 && (N) <= 63)) \
+ : ((N) >= 8 && (N) <= 13))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On SPARC (!v9), this is a single integer, which is a number of words
+ of arguments scanned so far (including the invisible argument,
+ if any, which holds the structure-value-address).
+ Thus 7 or more means all following args should go on the stack.
+
+ For v9, we also need to know whether a prototype is present. */
+
+struct sparc_args {
+ int words; /* number of words passed so far */
+ int prototype_p; /* non-zero if a prototype is present */
+ int libcall_p; /* non-zero if a library call */
+};
+#define CUMULATIVE_ARGS struct sparc_args
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+extern void init_cumulative_args ();
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+init_cumulative_args (& (CUM), (FNTYPE), (LIBNAME), (INDIRECT));
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ TYPE is null for libcalls where that information may not be available. */
+
+extern void function_arg_advance ();
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+function_arg_advance (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+extern struct rtx_def *function_arg ();
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+function_arg (& (CUM), (MODE), (TYPE), (NAMED), 0)
+
+/* Define where a function finds its arguments.
+ This is different from FUNCTION_ARG because of register windows. */
+
+#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \
+function_arg (& (CUM), (MODE), (TYPE), (NAMED), 1)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+extern int function_arg_partial_nregs ();
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+function_arg_partial_nregs (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* A C expression that indicates when an argument must be passed by reference.
+ If nonzero for an argument, a copy of that argument is made in memory and a
+ pointer to the argument is passed instead of the argument itself.
+ The pointer is passed in whatever way is appropriate for passing a pointer
+ to that type. */
+
+extern int function_arg_pass_by_reference ();
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+function_arg_pass_by_reference (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* If defined, a C expression which determines whether, and in which direction,
+ to pad out an argument with extra space. The value should be of type
+ `enum direction': either `upward' to pad above the argument,
+ `downward' to pad below, or `none' to inhibit padding. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+function_arg_padding ((MODE), (TYPE))
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments.
+ For sparc64, objects requiring 16 byte alignment are passed that way. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+((TARGET_ARCH64 \
+ && (GET_MODE_ALIGNMENT (MODE) == 128 \
+ || ((TYPE) && TYPE_ALIGN (TYPE) == 128))) \
+ ? 128 : PARM_BOUNDARY)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *sparc_compare_op0, *sparc_compare_op1;
+
+/* Define the function that build the compare insn for scc and bcc. */
+
+extern struct rtx_def *gen_compare_reg ();
+
+/* This function handles all v9 scc insns */
+
+extern int gen_v9_scc ();
+
+/* Generate the special assembly code needed to tell the assembler whatever
+ it might need to know about the return value of a function.
+
+ For Sparc assemblers, we need to output a .proc pseudo-op which conveys
+ information to the assembler relating to peephole optimization (done in
+ the assembler). */
+
+#define ASM_DECLARE_RESULT(FILE, RESULT) \
+ fprintf ((FILE), "\t.proc\t0%lo\n", sparc_type_code (TREE_TYPE (RESULT)))
+
+/* Output the label for a function definition. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+do { \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+} while (0)
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+/* On SPARC, move-double insns between fpu and cpu need an 8-byte block
+ of memory. If any fpu reg is used in the function, we allocate
+ such a block here, at the bottom of the frame, just in case it's needed.
+
+ If this function is a leaf procedure, then we may choose not
+ to do a "save" insn. The decision about whether or not
+ to do this is made in regclass.c. */
+
+extern int leaf_function;
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+ (TARGET_FLAT ? sparc_flat_output_function_prologue (FILE, (int)SIZE) \
+ : output_function_prologue (FILE, (int)SIZE, leaf_function))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry.
+
+ 32 bit sparc uses %g2 as the STATIC_CHAIN_REGNUM which gets clobbered
+ during profiling so we need to save/restore it around the call to mcount.
+ We're guaranteed that a save has just been done, and we use the space
+ allocated for intreg/fpreg value passing. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ char buf[20]; \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", (LABELNO)); \
+ if (! TARGET_ARCH64) \
+ fputs ("\tst %g2,[%fp-4]\n", FILE); \
+ fputs ("\tsethi %hi(", FILE); \
+ assemble_name (FILE, buf); \
+ fputs ("),%o0\n", FILE); \
+ fputs ("\tcall mcount\n\tadd %o0,%lo(", FILE); \
+ assemble_name (FILE, buf); \
+ fputs ("),%o0\n", FILE); \
+ if (! TARGET_ARCH64) \
+ fputs ("\tld [%fp-4],%g2\n", FILE); \
+ } while (0)
+
+/* There are three profiling modes for basic blocks available.
+ The modes are selected at compile time by using the options
+ -a or -ax of the gnu compiler.
+ The variable `profile_block_flag' will be set according to the
+ selected option.
+
+ profile_block_flag == 0, no option used:
+
+ No profiling done.
+
+ profile_block_flag == 1, -a option used.
+
+ Count frequency of execution of every basic block.
+
+ profile_block_flag == 2, -ax option used.
+
+ Generate code to allow several different profiling modes at run time.
+ Available modes are:
+ Produce a trace of all basic blocks.
+ Count frequency of jump instructions executed.
+ In every mode it is possible to start profiling upon entering
+ certain functions and to disable profiling of some other functions.
+
+ The result of basic-block profiling will be written to a file `bb.out'.
+ If the -ax option is used parameters for the profiling will be read
+ from file `bb.in'.
+
+*/
+
+/* The following macro shall output assembler code to FILE
+ to initialize basic-block profiling.
+
+ If profile_block_flag == 2
+
+ Output code to call the subroutine `__bb_init_trace_func'
+ and pass two parameters to it. The first parameter is
+ the address of a block allocated in the object module.
+ The second parameter is the number of the first basic block
+ of the function.
+
+ The name of the block is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ The number of the first basic block of the function is
+ passed to the macro in BLOCK_OR_LABEL.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ parameter1 <- LPBX0
+ parameter2 <- BLOCK_OR_LABEL
+ call __bb_init_trace_func
+
+ else if profile_block_flag != 0
+
+ Output code to call the subroutine `__bb_init_func'
+ and pass one single parameter to it, which is the same
+ as the first parameter to `__bb_init_trace_func'.
+
+ The first word of this parameter is a flag which will be nonzero if
+ the object module has already been initialized. So test this word
+ first, and do not call `__bb_init_func' if the flag is nonzero.
+ Note: When profile_block_flag == 2 the test need not be done
+ but `__bb_init_trace_func' *must* be called.
+
+ BLOCK_OR_LABEL may be used to generate a label number as a
+ branch destination in case `__bb_init_func' will not be called.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ cmp (LPBX0),0
+ jne local_label
+ parameter1 <- LPBX0
+ call __bb_init_func
+local_label:
+
+*/
+
+#define FUNCTION_BLOCK_PROFILER(FILE, BLOCK_OR_LABEL) \
+do \
+ { \
+ int bol = (BLOCK_OR_LABEL); \
+ switch (profile_block_flag) \
+ { \
+ case 2: \
+ fprintf (FILE, "\tsethi %%hi(LPBX0),%%o0\n\tor %%o0,%%lo(LPBX0),%%o0\n\tsethi %%hi(%d),%%o1\n\tcall ___bb_init_trace_func\n\tor %%o1,%%lo(%d),%%o1\n",\
+ bol, bol); \
+ break; \
+ default: \
+ fprintf (FILE, "\tsethi %%hi(LPBX0),%%o0\n\tld [%%lo(LPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne LPY%d\n\tadd %%o0,%%lo(LPBX0),%%o0\n\tcall ___bb_init_func\n\tnop\nLPY%d:\n",\
+ bol, bol); \
+ break; \
+ } \
+ } \
+while (0)
+
+/* The following macro shall output assembler code to FILE
+ to increment a counter associated with basic block number BLOCKNO.
+
+ If profile_block_flag == 2
+
+ Output code to initialize the global structure `__bb' and
+ call the function `__bb_trace_func' which will increment the
+ counter.
+
+ `__bb' consists of two words. In the first word the number
+ of the basic block has to be stored. In the second word
+ the address of a block allocated in the object module
+ has to be stored.
+
+ The basic block number is given by BLOCKNO.
+
+ The address of the block is given by the label created with
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ by FUNCTION_BLOCK_PROFILER.
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ move BLOCKNO -> (__bb)
+ move LPBX0 -> (__bb+4)
+ call __bb_trace_func
+
+ Note that function `__bb_trace_func' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE
+ and MACHINE_STATE_RESTORE. The last two macros will be
+ used in the function `__bb_trace_func', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE.
+
+ else if profile_block_flag != 0
+
+ Output code to increment the counter directly.
+ Basic blocks are numbered separately from zero within each
+ compiled object module. The count associated with block number
+ BLOCKNO is at index BLOCKNO in an array of words; the name of
+ this array is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language, the code to be
+ output looks like:
+
+ inc (LPBX2+4*BLOCKNO)
+
+*/
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+do \
+ { \
+ int blockn = (BLOCKNO); \
+ switch (profile_block_flag) \
+ { \
+ case 2: \
+ fprintf (FILE, "\tsethi %%hi(___bb),%%g1\n\tsethi %%hi(%d),%%g2\n\tor %%g2,%%lo(%d),%%g2\n\tst %%g2,[%%lo(___bb)+%%g1]\n\tsethi %%hi(LPBX0),%%g2\n\tor %%g2,%%lo(LPBX0),%%g2\n\tadd 4,%%g1,%%g1\n\tst %%g2,[%%lo(___bb)+%%g1]\n\tmov %%o7,%%g2\n\tcall ___bb_trace_func\n\tnop\n\tmov %%g2,%%o7\n",\
+ blockn, blockn); \
+ break; \
+ default: \
+ fprintf (FILE, "\tsethi %%hi(LPBX2+%d),%%g1\n\tld [%%lo(LPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(LPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+ break; \
+ } \
+ } \
+while(0)
+
+/* The following macro shall output assembler code to FILE
+ to indicate a return from function during basic-block profiling.
+
+ If profiling_block_flag == 2:
+
+ Output assembler code to call function `__bb_trace_ret'.
+
+ Note that function `__bb_trace_ret' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE_RET
+ and MACHINE_STATE_RESTORE_RET. The last two macros will be
+ used in the function `__bb_trace_ret', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE_RET.
+
+ else if profiling_block_flag != 0:
+
+ The macro will not be used, so it need not distinguish
+ these cases.
+*/
+
+#define FUNCTION_BLOCK_PROFILER_EXIT(FILE) \
+ fprintf (FILE, "\tcall ___bb_trace_ret\n\tnop\n" );
+
+/* The function `__bb_trace_func' is called in every basic block
+ and is not allowed to change the machine state. Saving (restoring)
+ the state can either be done in the BLOCK_PROFILER macro,
+ before calling function (rsp. after returning from function)
+ `__bb_trace_func', or it can be done inside the function by
+ defining the macros:
+
+ MACHINE_STATE_SAVE(ID)
+ MACHINE_STATE_RESTORE(ID)
+
+ In the latter case care must be taken, that the prologue code
+ of function `__bb_trace_func' does not already change the
+ state prior to saving it with MACHINE_STATE_SAVE.
+
+ The parameter `ID' is a string identifying a unique macro use.
+
+ On sparc it is sufficient to save the psw register to memory.
+ Unfortunately the psw register can be read in supervisor mode only,
+ so we read only the condition codes by using branch instructions
+ and hope that this is enough. */
+
+#define MACHINE_STATE_SAVE(ID) \
+ int ms_flags, ms_saveret; \
+ asm volatile( \
+ "mov %%g0,%0\n\
+ be,a LFLGNZ"ID"\n\
+ or %0,4,%0\n\
+LFLGNZ"ID":\n\
+ bcs,a LFLGNC"ID"\n\
+ or %0,1,%0\n\
+LFLGNC"ID":\n\
+ bvs,a LFLGNV"ID"\n\
+ or %0,2,%0\n\
+LFLGNV"ID":\n\
+ bneg,a LFLGNN"ID"\n\
+ or %0,8,%0\n\
+LFLGNN"ID":\n\
+ mov %%g2,%1" \
+ : "=r"(ms_flags), "=r"(ms_saveret));
+
+/* On sparc MACHINE_STATE_RESTORE restores the psw register from memory.
+ The psw register can be written in supervisor mode only,
+ which is true even for simple condition codes.
+ We use some combination of instructions to produce the
+ proper condition codes, but some flag combinations can not
+ be generated in this way. If this happens an unimplemented
+ instruction will be executed to abort the program. */
+
+#define MACHINE_STATE_RESTORE(ID) \
+{ extern char flgtab[] __asm__("LFLGTAB"ID); \
+ int scratch; \
+ asm volatile ( \
+ "jmpl %2+%1,%%g0\n\
+ ! Do part of VC in the delay slot here, as it needs 3 insns.\n\
+ addcc 2,%3,%%g0\n\
+LFLGTAB" ID ":\n\
+ ! 0\n\
+ ba LFLGRET"ID"\n\
+ orcc 1,%%g0,%%g0\n\
+ ! C\n\
+ ba LFLGRET"ID"\n\
+ addcc 2,%3,%%g0\n\
+ ! V\n\
+ unimp\n\
+ nop\n\
+ ! VC\n\
+ ba LFLGRET"ID"\n\
+ addxcc %4,%4,%0\n\
+ ! Z\n\
+ ba LFLGRET"ID"\n\
+ subcc %%g0,%%g0,%%g0\n\
+ ! ZC\n\
+ ba LFLGRET"ID"\n\
+ addcc 1,%3,%0\n\
+ ! ZVC\n\
+ ba LFLGRET"ID"\n\
+ addcc %4,%4,%0\n\
+ ! N\n\
+ ba LFLGRET"ID"\n\
+ orcc %%g0,-1,%%g0\n\
+ ! NC\n\
+ ba LFLGRET"ID"\n\
+ addcc %%g0,%3,%%g0\n\
+ ! NV\n\
+ unimp\n\
+ nop\n\
+ ! NVC\n\
+ unimp\n\
+ nop\n\
+ ! NZ\n\
+ unimp\n\
+ nop\n\
+ ! NZC\n\
+ unimp\n\
+ nop\n\
+ ! NZV\n\
+ unimp\n\
+ nop\n\
+ ! NZVC\n\
+ unimp\n\
+ nop\n\
+LFLGRET"ID":\n\
+ mov %5,%%g2" \
+ : "=r"(scratch) \
+ : "r"(ms_flags*8), "r"(flgtab), "r"(-1), \
+ "r"(0x80000000), "r"(ms_saveret) \
+ : "cc", "%g2"); }
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+extern int current_function_calls_alloca;
+extern int current_function_outgoing_args_size;
+
+#define EXIT_IGNORE_STACK \
+ (get_frame_size () != 0 \
+ || current_function_calls_alloca || current_function_outgoing_args_size)
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning. */
+
+/* This declaration is needed due to traditional/ANSI
+ incompatibilities which cannot be #ifdefed away
+ because they occur inside of macros. Sigh. */
+extern union tree_node *current_function_decl;
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+ (TARGET_FLAT ? sparc_flat_output_function_epilogue (FILE, (int)SIZE) \
+ : output_function_epilogue (FILE, (int)SIZE, leaf_function))
+
+#define DELAY_SLOTS_FOR_EPILOGUE \
+ (TARGET_FLAT ? sparc_flat_epilogue_delay_slots () : 1)
+#define ELIGIBLE_FOR_EPILOGUE_DELAY(trial, slots_filled) \
+ (TARGET_FLAT ? sparc_flat_eligible_for_epilogue_delay (trial, slots_filled) \
+ : eligible_for_epilogue_delay (trial, slots_filled))
+
+/* Define registers used by the epilogue and return instruction. */
+#define EPILOGUE_USES(REGNO) \
+ (!TARGET_FLAT && REGNO == 31)
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE (TARGET_ARCH64 ? 32 : 16)
+
+#define TRAMPOLINE_ALIGNMENT 128 /* 16 bytes */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void sparc_initialize_trampoline ();
+void sparc64_initialize_trampoline ();
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ if (TARGET_ARCH64) \
+ sparc64_initialize_trampoline (TRAMP, FNADDR, CXT); \
+ else \
+ sparc_initialize_trampoline (TRAMP, FNADDR, CXT)
+
+/* Generate necessary RTL for __builtin_saveregs().
+ ARGLIST is the argument list; see expr.c. */
+
+extern struct rtx_def *sparc_builtin_saveregs ();
+#define EXPAND_BUILTIN_SAVEREGS(ARGLIST) sparc_builtin_saveregs (ARGLIST)
+
+/* Define this macro if the location where a function argument is passed
+ depends on whether or not it is a named argument.
+
+ This macro controls how the NAMED argument to FUNCTION_ARG
+ is set for varargs and stdarg functions. With this macro defined,
+ the NAMED argument is always true for named arguments, and false for
+ unnamed arguments. If this is not defined, but SETUP_INCOMING_VARARGS
+ is defined, then all arguments are treated as named. Otherwise, all named
+ arguments except the last are treated as named.
+ For the v9 we want NAMED to mean what it says it means. */
+
+#define STRICT_ARGUMENT_NAMING TARGET_V9
+
+/* Generate RTL to flush the register windows so as to make arbitrary frames
+ available. */
+#define SETUP_FRAME_ADDRESSES() \
+ emit_insn (gen_flush_register_windows ())
+
+/* Given an rtx for the address of a frame,
+ return an rtx for the address of the word in the frame
+ that holds the dynamic chain--the previous frame's address.
+ ??? -mflat support? */
+#define DYNAMIC_CHAIN_ADDRESS(frame) \
+ gen_rtx_PLUS (Pmode, frame, GEN_INT (14 * UNITS_PER_WORD))
+
+/* The return address isn't on the stack, it is in a register, so we can't
+ access it from the current frame pointer. We can access it from the
+ previous frame pointer though by reading a value from the register window
+ save area. */
+#define RETURN_ADDR_IN_PREVIOUS_FRAME
+
+/* This is the offset of the return address to the true next instruction to be
+ executed for the current function. */
+#define RETURN_ADDR_OFFSET \
+ (8 + 4 * (! TARGET_ARCH64 && current_function_returns_struct))
+
+/* The current return address is in %i7. The return address of anything
+ farther back is in the register window save area at [%fp+60]. */
+/* ??? This ignores the fact that the actual return address is +8 for normal
+ returns, and +12 for structure returns. */
+#define RETURN_ADDR_RTX(count, frame) \
+ ((count == -1) \
+ ? gen_rtx_REG (Pmode, 31) \
+ : gen_rtx_MEM (Pmode, \
+ memory_address (Pmode, plus_constant (frame, 15 * UNITS_PER_WORD))))
+
+/* Before the prologue, the return address is %o7 + 8. OK, sometimes it's
+ +12, but always using +8 is close enough for frame unwind purposes.
+ Actually, just using %o7 is close enough for unwinding, but %o7+8
+ is something you can return to. */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_PLUS (word_mode, gen_rtx_REG (word_mode, 15), GEN_INT (8))
+
+/* The offset from the incoming value of %sp to the top of the stack frame
+ for the current function. On sparc64, we have to account for the stack
+ bias if present. */
+#define INCOMING_FRAME_SP_OFFSET SPARC_STACK_BIAS
+
+#define DOESNT_NEED_UNWINDER (! TARGET_FLAT)
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
+#define REGNO_OK_FOR_FP_P(REGNO) \
+ (((unsigned) (REGNO) - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)) \
+ || ((unsigned) reg_renumber[REGNO] - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)))
+#define REGNO_OK_FOR_CCFP_P(REGNO) \
+ (TARGET_V9 \
+ && (((unsigned) (REGNO) - 96 < (unsigned)4) \
+ || ((unsigned) reg_renumber[REGNO] - 96 < (unsigned)4)))
+
+/* Now macros that check whether X is a register and also,
+ strictly, whether it is in a specified class.
+
+ These macros are specific to the SPARC, and may be used only
+ in code for printing assembler insns and in conditions for
+ define_optimization. */
+
+/* 1 if X is an fp register. */
+
+#define FP_REG_P(X) (REG_P (X) && REGNO_OK_FOR_FP_P (REGNO (X)))
+
+/* Is X, a REG, an in or global register? i.e. is regno 0..7 or 24..31 */
+#define IN_OR_GLOBAL_P(X) (REGNO (X) < 8 || (REGNO (X) >= 24 && REGNO (X) <= 31))
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address.
+ When PIC, we do not accept an address that would require a scratch reg
+ to load into a register. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == HIGH \
+ || (GET_CODE (X) == CONST \
+ && ! (flag_pic && pic_address_needs_scratch (X))))
+
+/* Define this, so that when PIC, reload won't try to reload invalid
+ addresses which require two reload registers. */
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! pic_address_needs_scratch (X))
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ Anything can be made to work except floating point constants. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) != CONST_DOUBLE || GET_MODE (X) == VOIDmode)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+/* Optional extra constraints for this machine. Borrowed from romp.h.
+
+ For the SPARC, `Q' means that this is a memory operand but not a
+ symbolic memory operand. Note that an unassigned pseudo register
+ is such a memory operand. Needed because reload will generate
+ these things in insns and then not re-recognize the insns, causing
+ constrain_operands to fail.
+
+ `S' handles constraints for calls. ??? So where is it? */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+
+/* 'T', 'U' are for aligned memory loads which aren't needed for v9. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' \
+ ? ((GET_CODE (OP) == MEM \
+ && memory_address_p (GET_MODE (OP), XEXP (OP, 0)) \
+ && ! symbolic_memory_operand (OP, VOIDmode)) \
+ || (reload_in_progress && GET_CODE (OP) == REG \
+ && REGNO (OP) >= FIRST_PSEUDO_REGISTER)) \
+ : (! TARGET_ARCH64 && (C) == 'T') \
+ ? (mem_aligned_8 (OP)) \
+ : (! TARGET_ARCH64 && (C) == 'U') \
+ ? (register_ok_for_ldd (OP)) \
+ : 0)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' \
+ ? (GET_CODE (OP) == REG \
+ ? (REGNO (OP) >= FIRST_PSEUDO_REGISTER \
+ && reg_renumber[REGNO (OP)] < 0) \
+ : GET_CODE (OP) == MEM) \
+ : (! TARGET_ARCH64 && (C) == 'T') \
+ ? mem_aligned_8 (OP) && strict_memory_address_p (Pmode, XEXP (OP, 0)) \
+ : (! TARGET_ARCH64 && (C) == 'U') \
+ ? (GET_CODE (OP) == REG \
+ && (REGNO (OP) < FIRST_PSEUDO_REGISTER \
+ || reg_renumber[REGNO (OP)] >= 0) \
+ && register_ok_for_ldd (OP)) \
+ : 0)
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
+ ordinarily. This changes a bit when generating PIC.
+
+ If you change this, execute "rm explow.o recog.o reload.o". */
+
+#define RTX_OK_FOR_BASE_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
+
+#define RTX_OK_FOR_INDEX_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_INDEX_P (SUBREG_REG (X))))
+
+#define RTX_OK_FOR_OFFSET_P(X) \
+ (GET_CODE (X) == CONST_INT && INTVAL (X) >= -0x1000 && INTVAL (X) < 0x1000)
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (RTX_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ register rtx op0 = XEXP (X, 0); \
+ register rtx op1 = XEXP (X, 1); \
+ if (flag_pic && op0 == pic_offset_table_rtx) \
+ { \
+ if (RTX_OK_FOR_BASE_P (op1)) \
+ goto ADDR; \
+ else if (flag_pic == 1 \
+ && GET_CODE (op1) != REG \
+ && GET_CODE (op1) != LO_SUM \
+ && GET_CODE (op1) != MEM \
+ && (GET_CODE (op1) != CONST_INT \
+ || SMALL_INT (op1))) \
+ goto ADDR; \
+ } \
+ else if (RTX_OK_FOR_BASE_P (op0)) \
+ { \
+ if (RTX_OK_FOR_INDEX_P (op1) \
+ || RTX_OK_FOR_OFFSET_P (op1)) \
+ goto ADDR; \
+ } \
+ else if (RTX_OK_FOR_BASE_P (op1)) \
+ { \
+ if (RTX_OK_FOR_INDEX_P (op0) \
+ || RTX_OK_FOR_OFFSET_P (op0)) \
+ goto ADDR; \
+ } \
+ } \
+ else if (GET_CODE (X) == LO_SUM) \
+ { \
+ register rtx op0 = XEXP (X, 0); \
+ register rtx op1 = XEXP (X, 1); \
+ if (RTX_OK_FOR_BASE_P (op0) \
+ && CONSTANT_P (op1) \
+ /* We can't allow TFmode, because an offset \
+ greater than or equal to the alignment (8) \
+ may cause the LO_SUM to overflow. */ \
+ && MODE != TFmode) \
+ goto ADDR; \
+ } \
+ else if (GET_CODE (X) == CONST_INT && SMALL_INT (X)) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output. */
+
+/* On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ rtx sparc_x = (X); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == MULT) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 1), \
+ force_operand (XEXP (X, 0), NULL_RTX)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == MULT) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), NULL_RTX)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, force_operand (XEXP (X, 0), NULL_RTX),\
+ XEXP (X, 1)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), NULL_RTX)); \
+ if (sparc_x != (X) && memory_address_p (MODE, X)) \
+ goto WIN; \
+ if (flag_pic) (X) = legitimize_pic_address (X, MODE, 0); \
+ else if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ copy_to_mode_reg (Pmode, XEXP (X, 1))); \
+ else if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 0))) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 1), \
+ copy_to_mode_reg (Pmode, XEXP (X, 0))); \
+ else if (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == CONST \
+ || GET_CODE (X) == LABEL_REF) \
+ (X) = gen_rtx_LO_SUM (Pmode, \
+ copy_to_mode_reg (Pmode, gen_rtx_HIGH (Pmode, X)), X); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; }
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the SPARC this is never true. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+/* If we are referencing a function make the SYMBOL_REF special.
+ In the Embedded Medium/Anywhere code model, %g4 points to the data segment
+ so we must not add it to function addresses. */
+
+#define ENCODE_SECTION_INFO(DECL) \
+ do { \
+ if (TARGET_CM_EMBMEDANY && TREE_CODE (DECL) == FUNCTION_DECL) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1; \
+ } while (0)
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+#if 0 /* Sun 4 has matherr, so this is no good. */
+/* This is the value of the error code EDOM for this machine,
+ used by the sqrt instruction. */
+#define TARGET_EDOM 33
+
+/* This is how to refer to the variable errno. */
+#define GEN_ERRNO_RTX \
+ gen_rtx_MEM (SImode, gen_rtx_SYMBOL_REF (Pmode, "errno"))
+#endif /* 0 */
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+
+#define STORE_FLAG_VALUE 1
+
+/* When a prototype says `char' or `short', really pass an `int'. */
+#define PROMOTE_PROTOTYPES
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_PTR64 ? DImode : SImode)
+
+/* Generate calls to memcpy, memcmp and memset. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Add any extra modes needed to represent the condition code.
+
+ On the Sparc, we have a "no-overflow" mode which is used when an add or
+ subtract insn is used to set the condition code. Different branches are
+ used in this case for some operations.
+
+ We also have two modes to indicate that the relevant condition code is
+ in the floating-point condition code register. One for comparisons which
+ will generate an exception if the result is unordered (CCFPEmode) and
+ one for comparisons which will never trap (CCFPmode).
+
+ CCXmode and CCX_NOOVmode are only used by v9. */
+
+#define EXTRA_CC_MODES CCXmode, CC_NOOVmode, CCX_NOOVmode, CCFPmode, CCFPEmode
+
+/* Define the names for the modes specified above. */
+
+#define EXTRA_CC_NAMES "CCX", "CC_NOOV", "CCX_NOOV", "CCFP", "CCFPE"
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. For floating-point,
+ CCFP[E]mode is used. CC_NOOVmode should be used when the first operand is a
+ PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
+ processing is needed. */
+#define SELECT_CC_MODE(OP,X,Y) \
+ (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ ? ((OP == EQ || OP == NE) ? CCFPmode : CCFPEmode) \
+ : ((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS \
+ || GET_CODE (X) == NEG || GET_CODE (X) == ASHIFT) \
+ ? (TARGET_ARCH64 && GET_MODE (X) == DImode ? CCX_NOOVmode : CC_NOOVmode) \
+ : ((TARGET_ARCH64 || TARGET_V8PLUS) && GET_MODE (X) == DImode ? CCXmode : CCmode)))
+
+/* Return non-zero if SELECT_CC_MODE will never return MODE for a
+ floating point inequality comparison. */
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE SImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* alloca should avoid clobbering the old register save area. */
+#define SETJMP_VIA_SAVE_AREA
+
+/* Define subroutines to call to handle multiply and divide.
+ Use the subroutines that Sun's library provides.
+ The `*' prevents an underscore from being prepended by the compiler. */
+
+#define DIVSI3_LIBCALL "*.div"
+#define UDIVSI3_LIBCALL "*.udiv"
+#define MODSI3_LIBCALL "*.rem"
+#define UMODSI3_LIBCALL "*.urem"
+/* .umul is a little faster than .mul. */
+#define MULSI3_LIBCALL "*.umul"
+
+/* Define library calls for quad FP operations. These are all part of the
+ SPARC ABI. */
+#define ADDTF3_LIBCALL "_Q_add"
+#define SUBTF3_LIBCALL "_Q_sub"
+#define NEGTF2_LIBCALL "_Q_neg"
+#define MULTF3_LIBCALL "_Q_mul"
+#define DIVTF3_LIBCALL "_Q_div"
+#define FLOATSITF2_LIBCALL "_Q_itoq"
+#define FIX_TRUNCTFSI2_LIBCALL "_Q_qtoi"
+#define FIXUNS_TRUNCTFSI2_LIBCALL "_Q_qtou"
+#define EXTENDSFTF2_LIBCALL "_Q_stoq"
+#define TRUNCTFSF2_LIBCALL "_Q_qtos"
+#define EXTENDDFTF2_LIBCALL "_Q_dtoq"
+#define TRUNCTFDF2_LIBCALL "_Q_qtod"
+#define EQTF2_LIBCALL "_Q_feq"
+#define NETF2_LIBCALL "_Q_fne"
+#define GTTF2_LIBCALL "_Q_fgt"
+#define GETF2_LIBCALL "_Q_fge"
+#define LTTF2_LIBCALL "_Q_flt"
+#define LETF2_LIBCALL "_Q_fle"
+
+/* We can define the TFmode sqrt optab only if TARGET_FPU. This is because
+ with soft-float, the SFmode and DFmode sqrt instructions will be absent,
+ and the compiler will notice and try to use the TFmode sqrt instruction
+ for calls to the builtin function sqrt, but this fails. */
+#define INIT_TARGET_OPTABS \
+ do { \
+ add_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, ADDTF3_LIBCALL); \
+ sub_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, SUBTF3_LIBCALL); \
+ neg_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, NEGTF2_LIBCALL); \
+ smul_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, MULTF3_LIBCALL); \
+ flodiv_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, DIVTF3_LIBCALL); \
+ eqtf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EQTF2_LIBCALL); \
+ netf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, NETF2_LIBCALL); \
+ gttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GTTF2_LIBCALL); \
+ getf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GETF2_LIBCALL); \
+ lttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LTTF2_LIBCALL); \
+ letf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LETF2_LIBCALL); \
+ trunctfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFSF2_LIBCALL); \
+ trunctfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFDF2_LIBCALL); \
+ extendsftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDSFTF2_LIBCALL); \
+ extenddftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDDFTF2_LIBCALL); \
+ floatsitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, FLOATSITF2_LIBCALL); \
+ fixtfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, FIX_TRUNCTFSI2_LIBCALL); \
+ fixunstfsi_libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, FIXUNS_TRUNCTFSI2_LIBCALL); \
+ if (TARGET_FPU) \
+ sqrt_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_Q_sqrt"); \
+ INIT_SUBTARGET_OPTABS; \
+ } while (0)
+
+/* This is meant to be redefined in the host dependent files */
+#define INIT_SUBTARGET_OPTABS
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ if (INTVAL (RTX) < 0x1000 && INTVAL (RTX) >= -0x1000) \
+ return 0; \
+ case HIGH: \
+ return 2; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 4; \
+ case CONST_DOUBLE: \
+ if (GET_MODE (RTX) == DImode) \
+ if ((XINT (RTX, 3) == 0 \
+ && (unsigned) XINT (RTX, 2) < 0x1000) \
+ || (XINT (RTX, 3) == -1 \
+ && XINT (RTX, 2) < 0 \
+ && XINT (RTX, 2) >= -0x1000)) \
+ return 0; \
+ return 8;
+
+/* Compute the cost of an address. For the sparc, all valid addresses are
+ the same cost. */
+
+#define ADDRESS_COST(RTX) 1
+
+/* Compute extra cost of moving data between one register class
+ and another. */
+#define GENERAL_OR_I64(C) ((C) == GENERAL_REGS || (C) == I64_REGS)
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((FP_REG_CLASS_P (CLASS1) && GENERAL_OR_I64 (CLASS2)) \
+ || (GENERAL_OR_I64 (CLASS1) && FP_REG_CLASS_P (CLASS2)) \
+ || (CLASS1) == FPCC_REGS || (CLASS2) == FPCC_REGS) \
+ ? (sparc_cpu == PROCESSOR_ULTRASPARC ? 12 : 6) : 2)
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. The purpose for the cost of MULT is to encourage
+ `synth_mult' to find a synthetic multiply when reasonable.
+
+ If we need more than 12 insns to do a multiply, then go out-of-line,
+ since the call overhead will be < 10% of the cost of the multiply. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case MULT: \
+ return TARGET_HARD_MUL ? COSTS_N_INSNS (5) : COSTS_N_INSNS (25); \
+ case DIV: \
+ case UDIV: \
+ case MOD: \
+ case UMOD: \
+ return COSTS_N_INSNS (25); \
+ /* Make FLOAT and FIX more expensive than CONST_DOUBLE,\
+ so that cse will favor the latter. */ \
+ case FLOAT: \
+ case FIX: \
+ return 19;
+
+#define ISSUE_RATE sparc_issue_rate()
+
+/* Adjust the cost of dependencies. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ if (sparc_cpu == PROCESSOR_SUPERSPARC) \
+ (COST) = supersparc_adjust_cost (INSN, LINK, DEP, COST); \
+ else if (sparc_cpu == PROCESSOR_ULTRASPARC) \
+ (COST) = ultrasparc_adjust_cost (INSN, LINK, DEP, COST); \
+ else
+
+/* Conditional branches with empty delay slots have a length of two. */
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+ if (GET_CODE (INSN) == CALL_INSN \
+ || (GET_CODE (INSN) == JUMP_INSN && ! simplejump_p (insn))) \
+ LENGTH += 1; else
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(file)
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+
+#define ASM_COMMENT_START "!"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* ??? Try to make the style consistent here (_OP?). */
+
+#define ASM_LONGLONG ".xword"
+#define ASM_LONG ".word"
+#define ASM_SHORT ".half"
+#define ASM_BYTE_OP ".byte"
+#define ASM_FLOAT ".single"
+#define ASM_DOUBLE ".double"
+#define ASM_LONGDOUBLE ".xxx" /* ??? Not known (or used yet). */
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7", \
+ "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7", \
+ "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7", \
+ "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7", \
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7", \
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15", \
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23", \
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31", \
+ "%f32", "%f33", "%f34", "%f35", "%f36", "%f37", "%f38", "%f39", \
+ "%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47", \
+ "%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55", \
+ "%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63", \
+ "%fcc0", "%fcc1", "%fcc2", "%fcc3", "%icc"}
+
+/* Define additional names for use in asm clobbers and asm declarations. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{{"ccr", SPARC_ICC_REG}, {"cc", SPARC_ICC_REG}}
+
+/* How to renumber registers for dbx and gdb. In the flat model, the frame
+ pointer is really %i7. */
+
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+
+/* On Sun 4, this limit is 2048. We use 1000 to be safe, since the length
+ can run past this up to a continuation point. Once we used 1500, but
+ a single entry in C++ can run more than 500 bytes, due to the length of
+ mangled symbol names. dbxout.c should really be fixed to do
+ continuations when they are actually needed instead of trying to
+ guess... */
+#define DBX_CONTIN_LENGTH 1000
+
+/* This is how to output a note to DBX telling it the line number
+ to which the following sequence of instructions corresponds.
+
+ This is needed for SunOS 4.0, and should not hurt for 3.2
+ versions either. */
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+ { static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,LM%d\nLM%d:\n", \
+ line, sym_lineno, sym_lineno); \
+ sym_lineno += 1; }
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.global ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*%s%ld", (PREFIX), (long)(NUM))
+
+/* This is how to output an assembler line defining a `float' constant.
+ We always have to use a .long pseudo-op to do this because the native
+ SVR4 ELF assembler is buggy and it generates incorrect values when we
+ try to use the .float pseudo-op instead. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ { \
+ long t; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t, \
+ ASM_COMMENT_START, str); \
+ } \
+
+/* This is how to output an assembler line defining a `double' constant.
+ We always have to use a .long pseudo-op to do this because the native
+ SVR4 ELF assembler is buggy and it generates incorrect values when we
+ try to use the .float pseudo-op instead. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ { \
+ long t[2]; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t[0], \
+ ASM_COMMENT_START, str); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[1]); \
+ }
+
+/* This is how to output an assembler line defining a `long double'
+ constant. */
+
+#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) \
+ { \
+ long t[4]; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t[0], \
+ ASM_COMMENT_START, str); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[1]); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[2]); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[3]); \
+ }
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_LONG), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line defining a DImode constant. */
+#define ASM_OUTPUT_DOUBLE_INT(FILE,VALUE) \
+ output_double_int (FILE, VALUE)
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_SHORT), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_BYTE_OP), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t%s\t0x%x\n", ASM_BYTE_OP, (VALUE))
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ if (Pmode == SImode) \
+ fprintf (FILE, "\t.word\t"); \
+ else \
+ fprintf (FILE, "\t.xword\t"); \
+ assemble_name (FILE, label); \
+ fputc ('\n', FILE); \
+} while (0)
+
+/* This is how to output an element of a case-vector that is relative.
+ (SPARC uses such vectors only when generating PIC.) */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ if (Pmode == SImode) \
+ fprintf (FILE, "\t.word\t"); \
+ else \
+ fprintf (FILE, "\t.xword\t"); \
+ assemble_name (FILE, label); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", (REL)); \
+ fputc ('-', FILE); \
+ assemble_name (FILE, label); \
+ fputc ('\n', FILE); \
+} while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (1<<(LOG)))
+
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) (sparc_align_jumps)
+
+#define LOOP_ALIGN(LABEL) (sparc_align_loops)
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs ("\t.common ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u,\"bss\"\n", (SIZE)))
+
+/* This says how to output an assembler line to define a local common
+ symbol. */
+
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGNED) \
+( fputs ("\t.reserve ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u,\"bss\",%u\n", \
+ (SIZE), ((ALIGNED) / BITS_PER_UNIT)))
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ do { \
+ fputs (".globl ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs ("\n", (FILE)); \
+ ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
+ } while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+#define IDENT_ASM_OP ".ident"
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int big_delta = (DELTA) >= 4096 || (DELTA) < -4096; \
+ if (big_delta) \
+ fprintf (FILE, "\tset %d,%%g1\n\tadd %%o0,%%g1,%%o0\n", (DELTA)); \
+ /* Don't use the jmp solution unless we know the target is local to \
+ the application or shared object. \
+ XXX: Wimp out and don't actually check anything except if this is \
+ an embedded target where we assume there are no shared libs. */ \
+ if (!TARGET_CM_EMBMEDANY || flag_pic) \
+ { \
+ if (! big_delta) \
+ fprintf (FILE, "\tadd %%o0,%d,%%o0\n", DELTA); \
+ fprintf (FILE, "\tmov %%o7,%%g1\n"); \
+ fprintf (FILE, "\tcall "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ",0\n"); \
+ } \
+ else if (TARGET_CM_EMBMEDANY) \
+ { \
+ fprintf (FILE, "\tsetx "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ",%%g5,%%g1\n\tjmp %%g1\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tsethi %%hi("); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, "),%%g1\n\tjmp %%g1+%%lo("); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ")\n"); \
+ } \
+ if (!TARGET_CM_EMBMEDANY || flag_pic) \
+ fprintf (FILE, "\tmov %%g1,%%o7\n"); \
+ else if (big_delta) \
+ fprintf (FILE, "\tnop\n"); \
+ else \
+ fprintf (FILE, "\tadd %%o0,%d,%%o0\n", DELTA); \
+} while (0)
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+ ((CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR) == '(' || (CHAR) == '_')
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+{ register rtx base, index = 0; \
+ int offset = 0; \
+ register rtx addr = ADDR; \
+ if (GET_CODE (addr) == REG) \
+ fputs (reg_names[REGNO (addr)], FILE); \
+ else if (GET_CODE (addr) == PLUS) \
+ { \
+ if (GET_CODE (XEXP (addr, 0)) == CONST_INT) \
+ offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);\
+ else if (GET_CODE (XEXP (addr, 1)) == CONST_INT) \
+ offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);\
+ else \
+ base = XEXP (addr, 0), index = XEXP (addr, 1); \
+ fputs (reg_names[REGNO (base)], FILE); \
+ if (index == 0) \
+ fprintf (FILE, "%+d", offset); \
+ else if (GET_CODE (index) == REG) \
+ fprintf (FILE, "+%s", reg_names[REGNO (index)]); \
+ else if (GET_CODE (index) == SYMBOL_REF \
+ || GET_CODE (index) == LABEL_REF \
+ || GET_CODE (index) == CONST) \
+ fputc ('+', FILE), output_addr_const (FILE, index); \
+ else abort (); \
+ } \
+ else if (GET_CODE (addr) == MINUS \
+ && GET_CODE (XEXP (addr, 1)) == LABEL_REF) \
+ { \
+ output_addr_const (FILE, XEXP (addr, 0)); \
+ fputs ("-(", FILE); \
+ output_addr_const (FILE, XEXP (addr, 1)); \
+ fputs ("-.)", FILE); \
+ } \
+ else if (GET_CODE (addr) == LO_SUM) \
+ { \
+ output_operand (XEXP (addr, 0), 0); \
+ fputs ("+%lo(", FILE); \
+ output_address (XEXP (addr, 1)); \
+ fputc (')', FILE); \
+ } \
+ else if (flag_pic && GET_CODE (addr) == CONST \
+ && GET_CODE (XEXP (addr, 0)) == MINUS \
+ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST \
+ && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS \
+ && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx) \
+ { \
+ addr = XEXP (addr, 0); \
+ output_addr_const (FILE, XEXP (addr, 0)); \
+ /* Group the args of the second CONST in parenthesis. */ \
+ fputs ("-(", FILE); \
+ /* Skip past the second CONST--it does nothing for us. */\
+ output_addr_const (FILE, XEXP (XEXP (addr, 1), 0)); \
+ /* Close the parenthesis. */ \
+ fputc (')', FILE); \
+ } \
+ else \
+ { \
+ output_addr_const (FILE, addr); \
+ } \
+}
+
+/* Define the codes that are matched by predicates in sparc.c. */
+
+#define PREDICATE_CODES \
+{"reg_or_0_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
+{"fp_zero_operand", {CONST_DOUBLE}}, \
+{"intreg_operand", {SUBREG, REG}}, \
+{"fcc_reg_operand", {REG}}, \
+{"icc_or_fcc_reg_operand", {REG}}, \
+{"restore_operand", {REG}}, \
+{"call_operand", {MEM}}, \
+{"call_operand_address", {SYMBOL_REF, LABEL_REF, CONST, CONST_DOUBLE, ADDRESSOF, \
+ SUBREG, REG, PLUS, LO_SUM, CONST_INT}}, \
+{"symbolic_operand", {SYMBOL_REF, LABEL_REF, CONST, CONST_DOUBLE}}, \
+{"symbolic_memory_operand", {SUBREG, MEM}}, \
+{"label_ref_operand", {LABEL_REF}}, \
+{"sp64_medium_pic_operand", {CONST}}, \
+{"data_segment_operand", {SYMBOL_REF, PLUS, CONST}}, \
+{"text_segment_operand", {LABEL_REF, SYMBOL_REF, PLUS, CONST}}, \
+{"reg_or_nonsymb_mem_operand", {SUBREG, REG, MEM}}, \
+{"sparc_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, MEM}}, \
+{"move_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE, MEM}}, \
+{"splittable_symbolic_memory_operand", {MEM}}, \
+{"splittable_immediate_memory_operand", {MEM}}, \
+{"eq_or_neq", {EQ, NE}}, \
+{"normal_comp_operator", {GE, GT, LE, LT, GTU, LEU}}, \
+{"noov_compare_op", {NE, EQ, GE, GT, LE, LT, GEU, GTU, LEU, LTU}}, \
+{"v9_regcmp_op", {EQ, NE, GE, LT, LE, GT}}, \
+{"v8plus_regcmp_op", {EQ, NE}}, \
+{"extend_op", {SIGN_EXTEND, ZERO_EXTEND}}, \
+{"cc_arithop", {AND, IOR, XOR}}, \
+{"cc_arithopn", {AND, IOR}}, \
+{"arith_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith11_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith10_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"arith11_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"arith10_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"small_int", {CONST_INT, CONSTANT_P_RTX}}, \
+{"uns_small_int", {CONST_INT, CONSTANT_P_RTX}}, \
+{"uns_arith_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+{"clobbered_register", {REG}},
+
+
+/* The number of Pmode words for the setjmp buffer. */
+#define JMP_BUF_SIZE 12
+
+#define DONT_ACCESS_GBLS_AFTER_EPILOGUE (flag_pic)
+
+/* Declare functions defined in sparc.c and used in templates. */
+
+extern char *doublemove_string ();
+extern char *output_block_move ();
+extern char *output_cbranch ();
+extern char *output_fp_move_double ();
+extern char *output_fp_move_quad ();
+extern char *output_move_double ();
+extern char *output_move_quad ();
+extern char *output_return ();
+extern char *output_scc_insn ();
+extern char *output_v9branch ();
+extern char *singlemove_string ();
+
+extern void emit_v9_brxx_insn ();
+extern void finalize_pic ();
+extern void order_regs_for_local_alloc ();
+extern void output_double_int ();
+extern void output_function_epilogue ();
+extern void output_function_prologue ();
+extern void print_operand ();
+extern void sparc_flat_output_function_epilogue ();
+extern void sparc_flat_output_function_prologue ();
+
+extern int addrs_ok_for_ldd_peep ();
+extern int arith10_double_operand ();
+extern int arith10_operand ();
+extern int arith11_double_operand ();
+extern int arith11_operand ();
+extern int arith_double_operand ();
+extern int arith_operand ();
+extern int call_operand_address ();
+extern int cc_arithop ();
+extern int cc_arithopn ();
+extern int check_pic ();
+extern int compute_frame_size ();
+extern int data_segment_operand ();
+extern int eligible_for_epilogue_delay ();
+extern int eligible_for_return_delay ();
+extern int emit_move_sequence ();
+extern int extend_op ();
+extern int fcc_reg_operand ();
+extern int fp_zero_operand ();
+extern int icc_or_fcc_reg_operand ();
+extern int label_ref_operand ();
+extern int mem_aligned_8 ();
+extern int move_operand ();
+extern int noov_compare_op ();
+extern int pic_address_needs_scratch ();
+extern int reg_or_0_operand ();
+extern int reg_or_nonsymb_mem_operand ();
+extern int reg_unused_after ();
+extern int register_ok_for_ldd ();
+extern int registers_ok_for_ldd_peep ();
+extern int restore_operand ();
+extern int short_branch ();
+extern int small_int ();
+extern int sp64_medium_pic_operand ();
+extern int sparc_flat_eligible_for_epilogue_delay ();
+extern int sparc_flat_epilogue_delay_slots ();
+extern int sparc_issue_rate ();
+extern int sparc_operand ();
+extern int splittable_immediate_memory_operand ();
+extern int splittable_symbolic_memory_operand ();
+extern int supersparc_adjust_cost ();
+extern int symbolic_memory_operand ();
+extern int symbolic_operand ();
+extern int text_segment_operand ();
+extern int ultrasparc_adjust_cost ();
+extern int uns_small_int ();
+extern int v8plus_regcmp_op ();
+extern int v8plus_regcmp_p ();
+extern int v9_regcmp_op ();
+extern int v9_regcmp_p ();
+
+extern unsigned long sparc_flat_compute_frame_size ();
+extern unsigned long sparc_type_code ();
+
+extern char *sparc_v8plus_shift ();
+
+#ifdef __STDC__
+/* Function used for V8+ code generation. Returns 1 if the high
+ 32 bits of REG are 0 before INSN. */
+extern int sparc_check_64 (struct rtx_def *, struct rtx_def *);
+extern int sparc_return_peephole_ok (struct rtx_def *, struct rtx_def *);
+extern int compute_frame_size (int, int);
+#endif
+
+/* Defined in flags.h, but insn-emit.c does not include flags.h. */
+
+extern int flag_pic;
diff --git a/contrib/gcc/config/sparc/sparc.md b/contrib/gcc/config/sparc/sparc.md
new file mode 100644
index 0000000..ccfde05
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.md
@@ -0,0 +1,6684 @@
+;;- Machine description for SPARC chip for GNU C compiler
+;; Copyright (C) 1987, 88, 89, 92-96, 1997 Free Software Foundation, Inc.
+;; Contributed by Michael Tiemann (tiemann@cygnus.com)
+;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+;; at Cygnus Support.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
+;; a second register class, EXTRA_FP_REGS, exists for the v9 chip. The name
+;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
+;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
+;; 'f' for all DF/TFmode values, including those that are specific to the v8.
+;;
+;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to
+;; test TARGET_LIVE_G0 if we have TARGET_ARCH64.
+
+;; Attribute for cpu type.
+;; These must match the values for enum processor_type in sparc.h.
+(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v9,ultrasparc"
+ (const (symbol_ref "sparc_cpu_attr")))
+
+;; Attribute for the instruction set.
+;; At present we only need to distinguish v9/!v9, but for clarity we
+;; test TARGET_V8 too.
+(define_attr "isa" "v6,v8,v9,sparclet"
+ (const
+ (cond [(symbol_ref "TARGET_V9") (const_string "v9")
+ (symbol_ref "TARGET_V8") (const_string "v8")
+ (symbol_ref "TARGET_SPARCLET") (const_string "sparclet")]
+ (const_string "v6"))))
+
+;; Architecture size.
+(define_attr "arch" "arch32bit,arch64bit"
+ (const
+ (cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")]
+ (const_string "arch32bit"))))
+
+;; Whether -mlive-g0 is in effect.
+(define_attr "live_g0" "no,yes"
+ (const
+ (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")]
+ (const_string "no"))))
+
+;; Insn type. Used to default other attribute values.
+
+;; type "unary" insns have one input operand (1) and one output operand (0)
+;; type "binary" insns have two input operands (1,2) and one output (0)
+;; type "compare" insns have one or two input operands (0,1) and no output
+;; type "call_no_delay_slot" is a call followed by an unimp instruction.
+
+(define_attr "type"
+ "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrt,cmove,multi,misc"
+ (const_string "binary"))
+
+;; Set true if insn uses call-clobbered intermediate register.
+(define_attr "use_clobbered" "false,true"
+ (if_then_else (and (eq_attr "type" "address")
+ (match_operand 0 "clobbered_register" ""))
+ (const_string "true")
+ (const_string "false")))
+
+;; Length (in # of insns).
+(define_attr "length" ""
+ (cond [(eq_attr "type" "load,sload,fpload")
+ (if_then_else (match_operand 1 "symbolic_memory_operand" "")
+ (const_int 2) (const_int 1))
+
+ (eq_attr "type" "store,fpstore")
+ (if_then_else (match_operand 0 "symbolic_memory_operand" "")
+ (const_int 2) (const_int 1))
+
+ (eq_attr "type" "address") (const_int 2)
+
+ (eq_attr "type" "binary")
+ (if_then_else (ior (match_operand 2 "arith_operand" "")
+ (match_operand 2 "arith_double_operand" ""))
+ (const_int 1) (const_int 3))
+
+ (eq_attr "type" "multi") (const_int 2)
+
+ (eq_attr "type" "move,unary")
+ (if_then_else (ior (match_operand 1 "arith_operand" "")
+ (match_operand 1 "arith_double_operand" ""))
+ (const_int 1) (const_int 2))]
+
+ (const_int 1)))
+
+(define_asm_attributes
+ [(set_attr "length" "1")
+ (set_attr "type" "multi")])
+
+;; Attributes for instruction and branch scheduling
+
+(define_attr "in_call_delay" "false,true"
+ (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,return,multi")
+ (const_string "false")
+ (eq_attr "type" "load,fpload,store,fpstore")
+ (if_then_else (eq_attr "length" "1")
+ (const_string "true")
+ (const_string "false"))
+ (eq_attr "type" "address")
+ (if_then_else (eq_attr "use_clobbered" "false")
+ (const_string "true")
+ (const_string "false"))]
+ (if_then_else (eq_attr "length" "1")
+ (const_string "true")
+ (const_string "false"))))
+
+(define_delay (eq_attr "type" "call")
+ [(eq_attr "in_call_delay" "true") (nil) (nil)])
+
+(define_attr "leaf_function" "false,true"
+ (const (symbol_ref "leaf_function")))
+
+
+(define_attr "in_return_delay" "false,true"
+ (if_then_else (and (and (and (eq_attr "type" "move,load,sload,store,binary,ialu")
+ (eq_attr "length" "1"))
+ (eq_attr "leaf_function" "false"))
+ (match_insn "eligible_for_return_delay"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_delay (and (eq_attr "type" "return")
+ (eq_attr "isa" "v9"))
+ [(eq_attr "in_return_delay" "true") (nil) (nil)])
+
+;; ??? Should implement the notion of predelay slots for floating point
+;; branches. This would allow us to remove the nop always inserted before
+;; a floating point branch.
+
+;; ??? It is OK for fill_simple_delay_slots to put load/store instructions
+;; in a delay slot, but it is not OK for fill_eager_delay_slots to do so.
+;; This is because doing so will add several pipeline stalls to the path
+;; that the load/store did not come from. Unfortunately, there is no way
+;; to prevent fill_eager_delay_slots from using load/store without completely
+;; disabling them. For the SPEC benchmark set, this is a serious lose,
+;; because it prevents us from moving back the final store of inner loops.
+
+(define_attr "in_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_attr "in_uncond_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_attr "in_annul_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_delay (eq_attr "type" "branch")
+ [(eq_attr "in_branch_delay" "true")
+ (nil) (eq_attr "in_annul_branch_delay" "true")])
+
+(define_delay (eq_attr "type" "uncond_branch")
+ [(eq_attr "in_uncond_branch_delay" "true")
+ (nil) (nil)])
+
+;; Function units of the SPARC
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+
+;; The integer ALU.
+;; (Noted only for documentation; units that take one cycle do not need to
+;; be specified.)
+
+;; On the sparclite, integer multiply takes 1, 3, or 5 cycles depending on
+;; the inputs.
+
+;; (define_function_unit "alu" 1 0
+;; (eq_attr "type" "unary,binary,move,address") 1 0)
+
+;; ---- cypress CY7C602 scheduling:
+;; Memory with load-delay of 1 (i.e., 2 cycle load).
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "load,sload,fpload"))
+ 2 2)
+
+;; SPARC has two floating-point units: the FP ALU,
+;; and the FP MUL/DIV/SQRT unit.
+;; Instruction timings on the CY7C602 are as follows
+;; FABSs 4
+;; FADDs/d 5/5
+;; FCMPs/d 4/4
+;; FDIVs/d 23/37
+;; FMOVs 4
+;; FMULs/d 5/7
+;; FNEGs 4
+;; FSQRTs/d 34/63
+;; FSUBs/d 5/5
+;; FdTOi/s 5/5
+;; FsTOi/d 5/5
+;; FiTOs/d 9/5
+
+;; The CY7C602 can only support 2 fp isnsn simultaneously.
+;; More insns cause the chip to stall.
+
+(define_function_unit "fp_alu" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fp,fpmove"))
+ 5 5)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpmul"))
+ 7 7)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpdivs,fpdivd"))
+ 37 37)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpsqrt"))
+ 63 63)
+
+;; ----- The TMS390Z55 scheduling
+;; The Supersparc can issue 1 - 3 insns per cycle: up to two integer,
+;; one ld/st, one fp.
+;; Memory delivers its result in one cycle to IU, zero cycles to FP
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload"))
+ 1 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpload"))
+ 0 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
+(define_function_unit "shift" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+;; There are only two write ports to the integer register file
+;; A store also uses a write port
+
+(define_function_unit "iwport" 2 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload,store,shift,ialu"))
+ 1 1)
+
+;; Timings; throughput/latency
+;; FADD 1/3 add/sub, format conv, compar, abs, neg
+;; FMUL 1/3
+;; FDIVs 4/6
+;; FDIVd 7/9
+;; FSQRTs 6/8
+;; FSQRTd 10/12
+;; IMUL 4/4
+
+(define_function_unit "fp_alu" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fp,fpmove,fpcmp"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpmul"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivs"))
+ 6 4)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivd"))
+ 9 7)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpsqrt"))
+ 12 10)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "imul"))
+ 4 4)
+
+;; ----- sparclet tsc701 scheduling
+;; The tsc701 issues 1 insn per cycle.
+;; Results may be written back out of order.
+
+;; Loads take 2 extra cycles to complete and 4 can be buffered at a time.
+
+(define_function_unit "tsc701_load" 4 1
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "load,sload"))
+ 3 1)
+
+;; Stores take 2(?) extra cycles to complete.
+;; It is desirable to not have any memory operation in the following 2 cycles.
+;; (??? or 2 memory ops in the case of std).
+
+(define_function_unit "tsc701_store" 1 0
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "store"))
+ 3 3
+ [(eq_attr "type" "load,sload,store")])
+
+;; The multiply unit has a latency of 5.
+(define_function_unit "tsc701_mul" 1 0
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "imul"))
+ 5 5)
+
+;; ----- The UltraSPARC-1 scheduling
+;; UltraSPARC has two integer units. Shift instructions can only execute
+;; on IE0. Condition code setting instructions, call, and jmpl (including
+;; the ret and retl pseudo-instructions) can only execute on IE1.
+;; Branch on register uses IE1, but branch on condition code does not.
+;; Conditional moves take 2 cycles. No other instruction can issue in the
+;; same cycle as a conditional move.
+;; Multiply and divide take many cycles during which no other instructions
+;; can issue.
+;; Memory delivers its result in two cycles (except for signed loads,
+;; which take one cycle more). One memory instruction can be issued per
+;; cycle.
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "load,fpload"))
+ 2 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "sload"))
+ 3 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
+(define_function_unit "ieu" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "ialu,binary,shift,compare,cmove,call"))
+ 1 1)
+
+(define_function_unit "ieu_shift" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+(define_function_unit "ieu_shift" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "cmove"))
+ 2 1)
+
+;; Timings; throughput/latency
+;; FMOV 1/1 fmov, fabs, fneg
+;; FMOVcc 1/2
+;; FADD 1/4 add/sub, format conv, compar
+;; FMUL 1/4
+;; FDIVs 12/12
+;; FDIVd 22/22
+;; FSQRTs 12/12
+;; FSQRTd 22/22
+;; FCMP takes 1 cycle to branch, 2 cycles to conditional move.
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmove"))
+ 1 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fp"))
+ 4 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmp"))
+ 2 1)
+
+(define_function_unit "fmul" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmul"))
+ 4 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivs"))
+ 12 12)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivd"))
+ 22 22)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpsqrt"))
+ 12 12)
+
+;; Compare instructions.
+;; This controls RTL generation and register allocation.
+
+;; We generate RTL for comparisons and branches by having the cmpxx
+;; patterns store away the operands. Then, the scc and bcc patterns
+;; emit RTL for both the compare and the branch.
+;;
+;; We do this because we want to generate different code for an sne and
+;; seq insn. In those cases, if the second operand of the compare is not
+;; const0_rtx, we want to compute the xor of the two operands and test
+;; it against zero.
+;;
+;; We start with the DEFINE_EXPANDs, then the DEFINE_INSNs to match
+;; the patterns. Finally, we have the DEFINE_SPLITs for some of the scc
+;; insns that actually require more than one machine instruction.
+
+;; Put cmpsi first among compare insns so it matches two CONST_INT operands.
+
+(define_expand "cmpsi"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "arith_operand" "")))]
+ ""
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdi"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "arith_double_operand" "")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmptf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:TF 0 "register_operand" "")
+ (match_operand:TF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; Now the compare DEFINE_INSNs.
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
+ "cmp %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmpdi_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "cmp %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn "cmpdi_v8plus"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "r,r,r")
+ (match_operand:DI 1 "arith_double_operand" "J,I,r")))
+ (clobber (match_scratch:SI 2 "=&h,&h,&h"))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*
+{
+ /* The srl can be omitted if the value in the %L0 or %L1 is already
+ zero extended. */
+
+ output_asm_insn (\"sllx %H0,32,%2\", operands);
+
+ if (sparc_check_64 (operands[0], insn) <= 0)
+ output_asm_insn (\"srl %L0,0,%L0\", operands);
+
+ switch (which_alternative)
+ {
+ case 0:
+ return \"orcc %L0,%2,%%g0\";
+ case 1:
+ return \"or %L0,%2,%2\;cmp %2,%1\";
+ case 2:
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn (\"srl %L1,0,%L1\", operands);
+ return \"sllx %H1,32,%3\;or %L0,%2,%2\;or %L1,%3,%3\;cmp %2,%3\";
+ default:
+ abort();
+ }
+}"
+ [(set_attr "length" "3,4,7")])
+
+(define_insn "*cmpsf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpes %0,%1,%2\";
+ return \"fcmpes %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpdf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmped %0,%1,%2\";
+ return \"fcmped %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmptf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpeq %0,%1,%2\";
+ return \"fcmpeq %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpsf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmps %0,%1,%2\";
+ return \"fcmps %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpdf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpd %0,%1,%2\";
+ return \"fcmpd %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmptf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpq %0,%1,%2\";
+ return \"fcmpq %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+;; Next come the scc insns. For seq, sne, sgeu, and sltu, we can do this
+;; without jumps using the addx/subx instructions. For seq/sne on v9 we use
+;; the same code as v8 (the addx/subx method has more applications). The
+;; exception to this is "reg != 0" which can be done in one instruction on v9
+;; (so we do it). For the rest, on v9 we use conditional moves; on v8, we do
+;; branches.
+
+;; Seq_special[_xxx] and sne_special[_xxx] clobber the CC reg, because they
+;; generate addcc/subcc instructions.
+
+(define_expand "seqsi_special"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "! TARGET_LIVE_G0"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "seqdi_special"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (eq:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "snesi_special"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "! TARGET_LIVE_G0"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "snedi_special"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ne:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "seqdi_special_trunc"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (eq:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "snedi_special_trunc"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ne:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "seqsi_special_extend"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (eq:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "snesi_special_extend"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (ne:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
+;; However, the code handles both SImode and DImode.
+(define_expand "seq"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == SImode)
+ {
+ rtx pat;
+
+ if (GET_MODE (operands[0]) == SImode)
+ pat = gen_seqsi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else if (! TARGET_ARCH64)
+ FAIL;
+ else
+ pat = gen_seqsi_special_extend (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == DImode)
+ {
+ rtx pat;
+
+ if (! TARGET_ARCH64)
+ FAIL;
+ else if (GET_MODE (operands[0]) == SImode)
+ pat = gen_seqdi_special_trunc (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else
+ pat = gen_seqdi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (EQ, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
+;; However, the code handles both SImode and DImode.
+(define_expand "sne"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == SImode)
+ {
+ rtx pat;
+
+ if (GET_MODE (operands[0]) == SImode)
+ pat = gen_snesi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else if (! TARGET_ARCH64)
+ FAIL;
+ else
+ pat = gen_snesi_special_extend (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == DImode)
+ {
+ rtx pat;
+
+ if (! TARGET_ARCH64)
+ FAIL;
+ else if (GET_MODE (operands[0]) == SImode)
+ pat = gen_snedi_special_trunc (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else
+ pat = gen_snedi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (NE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (GT, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (LT, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (GE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (le:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (LE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (! TARGET_V9)
+ {
+ rtx tem;
+
+ /* We can do ltu easily, so if both operands are registers, swap them and
+ do a LTU. */
+ if ((GET_CODE (sparc_compare_op0) == REG
+ || GET_CODE (sparc_compare_op0) == SUBREG)
+ && (GET_CODE (sparc_compare_op1) == REG
+ || GET_CODE (sparc_compare_op1) == SUBREG))
+ {
+ tem = sparc_compare_op0;
+ sparc_compare_op0 = sparc_compare_op1;
+ sparc_compare_op1 = tem;
+ emit_insn (gen_sltu (operands[0]));
+ DONE;
+ }
+ }
+ else
+ {
+ if (gen_v9_scc (GTU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (TARGET_V9)
+ {
+ if (gen_v9_scc (LTU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (TARGET_V9)
+ {
+ if (gen_v9_scc (GEU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (! TARGET_V9)
+ {
+ rtx tem;
+
+ /* We can do geu easily, so if both operands are registers, swap them and
+ do a GEU. */
+ if ((GET_CODE (sparc_compare_op0) == REG
+ || GET_CODE (sparc_compare_op0) == SUBREG)
+ && (GET_CODE (sparc_compare_op1) == REG
+ || GET_CODE (sparc_compare_op1) == SUBREG))
+ {
+ tem = sparc_compare_op0;
+ sparc_compare_op0 = sparc_compare_op1;
+ sparc_compare_op1 = tem;
+ emit_insn (gen_sgeu (operands[0]));
+ DONE;
+ }
+ }
+ else
+ {
+ if (gen_v9_scc (LEU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; Now the DEFINE_INSNs for the scc cases.
+
+;; The SEQ and SNE patterns are special because they can be done
+;; without any branching and do not involve a COMPARE.
+
+(define_insn "*snesi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_snesi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*snesi_zero_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*snedi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_snedi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (neg:DI (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,-1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*snedi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*seqsi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_seqsi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*seqsi_zero_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*seqdi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_seqdi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (neg:DI (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,-1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*seqdi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+;; We can also do (x + (i == 0)) and related, so put them in.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*x_plus_i_ne_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %2,0,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_minus_i_ne_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %2,0,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_plus_i_eq_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %2,-1,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_minus_i_eq_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %2,-1,%0"
+ [(set_attr "length" "2")])
+
+;; We can also do GEU and LTU directly, but these operate after a compare.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ltu:SI (reg:CC 100) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,0,%0"
+ [(set_attr "type" "misc")])
+
+(define_insn "*neg_sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,0,%0"
+ [(set_attr "type" "misc")])
+
+;; ??? Combine should canonicalize these next two to the same pattern.
+(define_insn "*neg_sltu_minus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*neg_sltu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "arith_operand" "rI"))))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (geu:SI (reg:CC 100) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,-1,%0"
+ [(set_attr "type" "misc")])
+
+(define_insn "*neg_sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,-1,%0"
+ [(set_attr "type" "misc")])
+
+;; We can also do (x + ((unsigned) i >= 0)) and related, so put them in.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*sltu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*sltu_plus_x_plus_y"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))))]
+ ""
+ "addx %1,%2,%0")
+
+(define_insn "*x_minus_sltu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "subx %1,0,%0"
+ [(set_attr "type" "unary")])
+
+;; ??? Combine should canonicalize these next two to the same pattern.
+(define_insn "*x_minus_y_minus_sltu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "subx %1,%2,%0")
+
+(define_insn "*x_minus_sltu_plus_y"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 2 "arith_operand" "rI"))))]
+ ""
+ "subx %1,%2,%0")
+
+(define_insn "*sgeu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (geu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "subx %1,-1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*x_minus_sgeu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (geu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "addx %1,-1,%0"
+ [(set_attr "type" "unary")])
+
+;; Now we have the generic scc insns.
+;; !v9: These will be done using a jump.
+;; v9: Use conditional moves which are defined elsewhere.
+;; We have to exclude the cases above, since we will not want combine to
+;; turn something that does not require a jump into something that does.
+
+(define_insn "*scc_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ ""
+ "* return output_scc_insn (operands, insn); "
+ [(set_attr "type" "multi")
+ (set_attr "length" "3")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ ;; 32 bit LTU/GEU are better implemented using addx/subx
+ "TARGET_V9 && REGNO (operands[1]) == SPARC_ICC_REG
+ && (GET_MODE (operands[1]) == CCXmode
+ || (GET_CODE (operands[2]) != LTU && GET_CODE (operands[2]) != GEU))"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup:SI 2 [(match_dup 1) (const_int 0)])
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
+(define_insn "*scc_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operator:DI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ "TARGET_ARCH64"
+ "* return output_scc_insn (operands, insn); "
+ [(set_attr "type" "multi")
+ (set_attr "length" "3")])
+
+;; These control RTL generation for conditional jump insns
+
+;; The quad-word fp compare library routines all return nonzero to indicate
+;; true, which is different from the equivalent libgcc routines, so we must
+;; handle them specially here.
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (EQ, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (NE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (GT, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (LT, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (GE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (LE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; Now match both normal and inverted jump.
+
+(define_insn "*normal_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "noov_compare_op"
+ [(reg 100) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[0], 1, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "noov_compare_op"
+ [(reg 100) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[0], 1, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*normal_fp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFP 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_fp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFP 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*normal_fpe_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_fpe_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+;; Sparc V9-specific jump insns. None of these are guaranteed to be
+;; in the architecture.
+
+;; There are no 32 bit brreg insns.
+
+(define_insn "*normal_int_branch_sp64"
+ [(set (pc)
+ (if_then_else (match_operator 0 "v9_regcmp_op"
+ [(match_operand:DI 1 "register_operand" "r")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_ARCH64"
+ "*
+{
+ return output_v9branch (operands[0], 1, 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_int_branch_sp64"
+ [(set (pc)
+ (if_then_else (match_operator 0 "v9_regcmp_op"
+ [(match_operand:DI 1 "register_operand" "r")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_ARCH64"
+ "*
+{
+ return output_v9branch (operands[0], 1, 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence);
+}"
+ [(set_attr "type" "branch")])
+
+;; Esoteric move insns (lo_sum, high, pic).
+
+(define_insn "*lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ ;; V9 needs "add" because of the code models. We still use "or" for v8
+ ;; so we can compare the old compiler with the new.
+ "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
+;; confuse them with real addresses.
+(define_insn "pic_lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
+ "flag_pic"
+ ;; V9 needs "add" because of the code models. We still use "or" for v8
+ ;; so we can compare the old compiler with the new.
+ "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; The PIC version of sethi must appear before the non-pic case so that
+;; the unspec will not be matched as part of the operand.
+;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
+;; confuse them with real addresses.
+(define_insn "pic_sethi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
+ "flag_pic && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "pic_lo_sum_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:DI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
+ "TARGET_ARCH64 && flag_pic"
+ "add %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "pic_sethi_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
+ "TARGET_ARCH64 && flag_pic && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "get_pc"
+ [(clobber (reg:SI 15))
+ (set (match_operand 0 "register_operand" "=r")
+ (unspec [(match_operand 1 "" "") (match_operand 2 "" "")] 2))]
+ "flag_pic && REGNO (operands[0]) == 23"
+ "sethi %%hi(%a1-4),%0\;call %a2\;add %0,%%lo(%a1+4),%0"
+ [(set_attr "length" "3")])
+
+(define_insn "get_pc_via_rdpc"
+ [(set (match_operand 0 "register_operand" "=r") (pc))]
+ "TARGET_V9"
+ "rd %%pc,%0"
+ [(set_attr "type" "move")])
+
+(define_insn "*sethi_hi"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (high:HI (match_operand 1 "" "")))]
+ "check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+;; This must appear after the PIC sethi so that the PIC unspec will not
+;; be matched as part of the operand.
+(define_insn "*sethi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "" "")))]
+ "check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*lo_sum_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "immediate_operand" "in")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ /* Don't output a 64 bit constant, since we can't trust the assembler to
+ handle it correctly. */
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && HOST_BITS_PER_WIDE_INT > 32
+ && INTVAL (operands[2]) > 0xffffffff)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
+
+ return \"or %L1,%%lo(%a2),%L0\";
+}"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; ??? Optimizer does not handle "or %o1,%lo(0),%o1". How about add?
+
+(define_insn "*lo_sum_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "immediate_operand" "in")))]
+ "TARGET_ARCH64"
+ "*
+{
+ /* Don't output a 64 bit constant, since we can't trust the assembler to
+ handle it correctly. */
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && HOST_BITS_PER_WIDE_INT > 32
+ && INTVAL (operands[2]) > 0xffffffff)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
+
+ /* Note that we use add here. This is important because Medium/Anywhere
+ code model support depends on it. */
+ return \"add %1,%%lo(%a2),%0\";
+}"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+(define_insn "*sethi_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "" "")))]
+ "! TARGET_ARCH64 && check_pic (1)"
+ "*
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ if (INTVAL (op1) < 0)
+ return \"mov -1,%0\";
+ else
+ return \"mov 0,%0\";
+ }
+ else if (GET_CODE (op1) == CONST_DOUBLE)
+ {
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
+ return singlemove_string (operands);
+ }
+ else
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "length" "2")])
+
+;;; ??? This pattern originally clobbered a scratch register. However, this
+;;; is invalid, the movdi pattern may not use a temp register because it
+;;; may be called from reload to reload a DImode value. In that case, we
+;;; end up with a scratch register that never gets allocated. To avoid this,
+;;; we use global register 1 which is never otherwise used by gcc as a temp.
+;;; The correct solution here might be to force DImode constants to memory,
+;;; e.g. by using a toc like the romp and rs6000 ports do for addresses, reg
+;;; 1 will then no longer need to be considered a fixed reg.
+
+(define_expand "sethi_di_sp64"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (high:DI (match_operand 1 "general_operand" "")))
+ (clobber (reg:DI 1))])]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*sethi_di_sp64_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "const_double_operand" "")))
+ (clobber (reg:DI 1))]
+ "TARGET_ARCH64 && check_pic (1)"
+ "*
+{
+#if HOST_BITS_PER_WIDE_INT == 32
+ rtx high, low;
+
+ split_double (operands[1], &high, &low);
+
+ if (high == const0_rtx)
+ {
+ operands[1] = low;
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ }
+ else
+ {
+ operands[1] = high;
+ output_asm_insn (singlemove_string (operands), operands);
+
+ operands[1] = low;
+ output_asm_insn (\"sllx %0,32,%0\", operands);
+ if (low != const0_rtx)
+ output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
+ }
+#else
+ rtx op = operands[1];
+
+ if (! SPARC_SETHI_P (INTVAL(op)))
+ {
+ operands[1] = GEN_INT (INTVAL (op) >> 32);
+ output_asm_insn (singlemove_string (operands), operands);
+
+ output_asm_insn (\"sllx %0,32,%0\", operands);
+ if (INTVAL (op) & 0xffffffff)
+ {
+ operands[1] = GEN_INT (INTVAL (op) & 0xffffffff);
+ output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
+ }
+ }
+ else
+ {
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ }
+#endif
+
+ return \"\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "length" "5")])
+
+;; Most of the required support for the various code models is here.
+;; We can do this because sparcs need the high insn to load the address. We
+;; just need to get high to do the right thing for each code model. Then each
+;; uses the same "%X+%lo(...)" in the load/store insn, though in the case of
+;; the medium/middle code model "%lo" is written "%l44".
+
+;; When TARGET_CM_MEDLOW, assume that the upper 32 bits of symbol addresses are
+;; always 0.
+;; When TARGET_CM_MEDMID, the executable must be in the low 16 TB of memory.
+;; This corresponds to the low 44 bits, and the %[hml]44 relocs are used.
+;; ??? Not implemented yet.
+;; When TARGET_CM_EMBMEDANY, the text and data segments have a maximum size of
+;; 31 bits and may be located anywhere. EMBMEDANY_BASE_REG contains the start
+;; address of the data segment, currently %g4.
+;; When TARGET_CM_MEDANY, the text and data segments have a maximum size of 31
+;; bits and may be located anywhere. The maximum offset from any instruction
+;; to the label _GLOBAL_OFFSET_TABLE_ is 31 bits.
+
+(define_insn "*sethi_di_medlow"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_MEDLOW && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*sethi_di_medium_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "sp64_medium_pic_operand" "")))]
+ "(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+;; WARNING: %0 gets %hi(%1)+%g4.
+;; You cannot OR in %lo(%1), it must be added in.
+
+(define_insn "*sethi_di_embmedany_data"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "data_segment_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi %%hi(%a1),%0; add %0,%_,%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "2")])
+
+(define_insn "*sethi_di_embmedany_text"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "text_segment_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi %%uhi(%a1),%%g1; or %%g1,%%ulo(%a1),%%g1; sllx %%g1,32,%%g1; sethi %%hi(%a1),%0; or %0,%%g1,%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "5")])
+
+;; Move instructions
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode))
+ DONE;
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
+ (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)
+ || operands[1] == const0_rtx)"
+ "@
+ mov %1,%0
+ sethi %%hi(%a1),%0
+ ldub %1,%0
+ stb %r1,%0"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "1")])
+
+(define_insn "*movqi_insn_liveg0"
+ [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
+ (match_operand:QI 1 "move_operand" "r,J,I,K,Q,r"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ sethi %%hi(%a1),%0
+ ldub %1,%0
+ stb %1,%0"
+ [(set_attr "type" "move,move,move,move,load,store")
+ (set_attr "length" "1,1,2,1,1,1")])
+
+(define_insn "*lo_sum_qi"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (subreg:QI (lo_sum:SI (match_operand:QI 1 "register_operand" "r")
+ (match_operand 2 "immediate_operand" "in")) 0))]
+ ""
+ "or %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "*store_qi"
+ [(set (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:QI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode))
+ DONE;
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
+ (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)
+ || operands[1] == const0_rtx)"
+ "@
+ mov %1,%0
+ sethi %%hi(%a1),%0
+ lduh %1,%0
+ sth %r1,%0"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "1")])
+
+(define_insn "*movhi_insn_liveg0"
+ [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
+ (match_operand:HI 1 "move_operand" "r,J,I,K,Q,r"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ sethi %%hi(%a1),%0
+ lduh %1,%0
+ sth %1,%0"
+ [(set_attr "type" "move,move,move,move,load,store")
+ (set_attr "length" "1,1,2,1,1,1")])
+
+(define_insn "*lo_sum_hi"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (lo_sum:HI (match_operand:HI 1 "register_operand" "r")
+ (match_operand 2 "immediate_operand" "in")))]
+ ""
+ "or %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "*store_hi"
+ [(set (mem:HI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:HI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode))
+ DONE;
+}")
+
+;; We must support both 'r' and 'f' registers here, because combine may
+;; convert SFmode hard registers to SImode hard registers when simplifying
+;; subreg sets.
+
+;; We cannot combine the similar 'r' and 'f' constraints, because it causes
+;; problems with register allocation. Reload might try to put an integer
+;; in an fp register, or an fp number is an integer register.
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q,d")
+ (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f,J"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)
+ || operands[1] == const0_rtx)
+ && (GET_CODE (operands[0]) != REG || ! CONSTANT_P (operands[1])
+ || REGNO (operands[0]) < 32
+ || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)"
+ "@
+ mov %1,%0
+ fmovs %1,%0
+ sethi %%hi(%a1),%0
+ ld %1,%0
+ ld %1,%0
+ st %r1,%0
+ st %1,%0
+ fzeros %0"
+ [(set_attr "type" "move,fpmove,move,load,fpload,store,fpstore,fpmove")
+ (set_attr "length" "1")])
+
+(define_insn "*movsi_insn_liveg0"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,f,r,r,f,Q,Q")
+ (match_operand:SI 1 "move_operand" "r,J,I,!f,K,Q,!Q,r,!f"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ fmovs %1,%0
+ sethi %%hi(%a1),%0
+ ld %1,%0
+ ld %1,%0
+ st %1,%0
+ st %1,%0"
+ [(set_attr "type" "move,move,move,fpmove,move,load,fpload,store,fpstore")
+ (set_attr "length" "1,1,2,1,1,1,1,1,1")])
+
+(define_insn "*store_si"
+ [(set (mem:SI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:SI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DImode))
+ DONE;
+}")
+
+;; 32 bit V9 movdi is like regular 32 bit except: a 64 bit zero can be stored
+;; to aligned memory with a single instruction, the ldd/std instructions
+;; are not used, and constants can not be moved to floating point registers.
+
+(define_insn "*movdi_sp32_v9"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,Q,r,r,?e,?e,?Q,?b")
+ (match_operand:DI 1 "general_operand" "r,J,r,Q,i,e,Q,e,J"))]
+ "TARGET_V9 && ! TARGET_ARCH64
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)
+ && (GET_CODE (operands[0]) != REG || ! CONSTANT_P (operands[1])
+ || REGNO (operands[0]) < 32
+ || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)"
+ "*
+{
+ if (which_alternative == 1)
+ return \"stx %%g0,%0\";
+ if (which_alternative == 8)
+ return \"fzero %0\";
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "move,store,store,load,multi,fp,fpload,fpstore,fpmove")
+ (set_attr "length" "2,1,3,3,3,2,3,3,1")])
+
+;; SPARC V9 deprecates std. Split it here.
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (match_operand:DI 1 "register_operand" "r"))]
+ "TARGET_V9 && ! TARGET_ARCH64 && reload_completed
+ && REGNO (operands[1]) < 32 && ! MEM_VOLATILE_P (operands[0])
+ && offsettable_memref_p (operands[0])"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "operands[3] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[1]);
+ operands[4] = adj_offsettable_operand (operands[0], 4);
+ PUT_MODE (operands[4], SImode);
+ operands[2] = copy_rtx (operands[0]);
+ PUT_MODE (operands[2], SImode);")
+
+;; Split register to register moves.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "arith_double_operand" "rIN"))]
+ "! TARGET_ARCH64
+ && REGNO (operands[0]) < 32
+ && GET_CODE (operands[1]) == REG && REGNO (operands[1]) < 32
+ && ! reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[1]);")
+
+(define_insn "*movdi_sp32"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,U,Q,r,r,?f,?f,?Q")
+ (match_operand:DI 1 "general_operand" "r,U,T,r,Q,i,f,Q,f"))]
+ "! TARGET_V9
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "move,store,load,store,load,multi,fp,fpload,fpstore")
+ (set_attr "length" "2,1,1,3,3,3,2,3,3")])
+
+;;; ??? The trick used below can be extended to load any negative 32 bit
+;;; constant in two instructions. Currently the compiler will use HIGH/LO_SUM
+;;; for anything not matching the HIK constraints, which results in 5
+;;; instructions. Positive 32 bit constants can be loaded in the obvious way
+;;; with sethi/ori. To extend the trick, in the xor instruction, use
+;;; xor %o0, ((op1 & 0x3ff) | -0x400), %o0
+;;; This needs the original value of operands[1], not the inverted value.
+
+(define_insn "*movdi_sp64_insn"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q,?e,?e,?Q")
+ (match_operand:DI 1 "move_operand" "rI,K,Q,rJ,e,Q,e"))]
+ "TARGET_ARCH64
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov %1,%0\";
+ case 1:
+ /* Sethi does not sign extend, so we must use a little trickery
+ to use it for negative numbers. Invert the constant before
+ loading it in, then use a xor immediate to invert the loaded bits
+ (along with the upper 32 bits) to the desired constant. This
+ works because the sethi and immediate fields overlap. */
+
+ if ((INTVAL (operands[1]) & 0x80000000) == 0)
+ return \"sethi %%hi(%a1),%0\";
+ else
+ {
+ operands[1] = GEN_INT (~INTVAL (operands[1]));
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ /* The low 10 bits are already zero, but invert the rest.
+ Assemblers don't accept 0x1c00, so use -0x400 instead. */
+ return \"xor %0,-0x400,%0\";
+ }
+ case 2:
+ return \"ldx %1,%0\";
+ case 3:
+ return \"stx %r1,%0\";
+ case 4:
+ return \"fmovd %1,%0\";
+ case 5:
+ return \"ldd %1,%0\";
+ case 6:
+ return \"std %1,%0\";
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "move,move,load,store,fp,fpload,fpstore")
+ (set_attr "length" "1,2,1,1,1,1,1")])
+
+;; ??? There's no symbolic (set (mem:DI ...) ...).
+;; Experimentation with v9 suggested one isn't needed.
+
+;; Floating point move insns
+
+;; This pattern forces (set (reg:SF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movsf pattern.
+(define_insn "*movsf_const_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,d,m,?r")
+ (match_operand:SF 1 "" "m,G,G,?F"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"ld %1,%0\";
+ case 1:
+ return \"fzeros %0\";
+ case 2:
+ return \"st %%g0,%0\";
+ case 3:
+ return singlemove_string (operands);
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "fpload,fpmove,store,load")
+ (set_attr "length" "1,1,1,2")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SFmode))
+ DONE;
+}")
+
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=f,f,Q,r,r,Q")
+ (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "f,Q,f,r,Q,r"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ fmovs %1,%0
+ ld %1,%0
+ st %1,%0
+ mov %1,%0
+ ld %1,%0
+ st %1,%0"
+ [(set_attr "type" "fpmove,fpload,fpstore,move,load,store")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -mno-fpu.
+
+(define_insn "*movsf_no_f_insn"
+ [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=r,r,Q")
+ (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "r,Q,r"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov %1,%0
+ ld %1,%0
+ st %1,%0"
+ [(set_attr "type" "move,load,store")])
+
+(define_insn "*store_sf"
+ [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i"))
+ (match_operand:SF 1 "reg_or_0_operand" "rfG"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+;; This pattern forces (set (reg:DF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movdf pattern.
+
+(define_insn "*movdf_const_insn"
+ [(set (match_operand:DF 0 "general_operand" "=?r,e,o,d")
+ (match_operand:DF 1 "" "?F,m,G,G"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_move_double (operands);
+ case 1:
+ return output_fp_move_double (operands);
+ case 2:
+ if (TARGET_ARCH64 || (TARGET_V9 && mem_aligned_8 (operands[0])))
+ {
+ return \"stx %%g0,%0\";
+ }
+ else
+ {
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ return \"st %%g0,%0\;st %%g0,%1\";
+ }
+ case 3:
+ return \"fzero %0\";
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "load,fpload,store,fpmove")
+ (set_attr "length" "3,3,3,1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DFmode))
+ DONE;
+}")
+
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=e,Q,e,T,U,r,Q,r")
+ (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "e,e,Q,U,T,r,r,Q"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "fp,fpstore,fpload,fpstore,fpload,move,store,load")
+ (set_attr "length" "2,3,3,1,1,2,2,2")])
+
+;; Exactly the same as above, except that all `e' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `e' reg
+;; when -mno-fpu.
+
+(define_insn "*movdf_no_e_insn"
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,r,Q,&r")
+ (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,r,r,Q"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "* return output_move_double (operands);"
+ [(set_attr "type" "store,load,move,store,load")
+ (set_attr "length" "1,1,2,3,3")])
+
+;; Must handle overlapping registers here, since parameters can be unaligned
+;; in registers.
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" ""))]
+ "! TARGET_ARCH64 && reload_completed
+ && REGNO (operands[0]) < SPARC_FIRST_V9_FP_REG
+ && REGNO (operands[1]) < SPARC_FIRST_V9_FP_REG"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{
+ rtx first_set = operand_subword (operands[0], 0, 0, DFmode);
+ rtx second_use = operand_subword (operands[1], 1, 0, DFmode);
+
+ if (REGNO (first_set) == REGNO (second_use))
+ {
+ operands[2] = operand_subword (operands[0], 1, 0, DFmode);
+ operands[3] = second_use;
+ operands[4] = first_set;
+ operands[5] = operand_subword (operands[1], 0, 0, DFmode);
+ }
+ else
+ {
+ operands[2] = first_set;
+ operands[3] = operand_subword (operands[1], 0, 0, DFmode);
+ operands[4] = operand_subword (operands[0], 1, 0, DFmode);
+ operands[5] = second_use;
+ }
+}")
+
+(define_insn "*store_df"
+ [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i"))
+ (match_operand:DF 1 "reg_or_0_operand" "re,G"))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "*
+{
+ output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
+ if (which_alternative == 0)
+ return \"std %1,[%2+%%lo(%a0)]\";
+ else
+ return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\";
+}"
+ [(set_attr "type" "store")
+ (set_attr "length" "3")])
+
+;; This pattern forces (set (reg:TF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movtf pattern.
+(define_insn "*movtf_const_insn"
+ [(set (match_operand:TF 0 "general_operand" "=?r,e,o")
+ (match_operand:TF 1 "" "?F,m,G"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_move_quad (operands);
+ case 1:
+ return output_fp_move_quad (operands);
+ case 2:
+ if (TARGET_ARCH64 || (TARGET_V9 && mem_aligned_8 (operands[0])))
+ {
+ operands[1] = adj_offsettable_operand (operands[0], 8);
+ return \"stx %%g0,%0\;stx %%g0,%1\";
+ }
+ else
+ {
+ /* ??? Do we run off the end of the array here? */
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ operands[2] = adj_offsettable_operand (operands[0], 8);
+ operands[3] = adj_offsettable_operand (operands[0], 12);
+ return \"st %%g0,%0\;st %%g0,%1\;st %%g0,%2\;st %%g0,%3\";
+ }
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "load,fpload,store")
+ (set_attr "length" "5,5,5")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, TFmode))
+ DONE;
+}")
+
+(define_insn "*movtf_insn"
+ [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=e,Q,e,r,Q,r")
+ (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "e,e,Q,r,r,Q"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_quad (operands);
+ return output_move_quad (operands);
+}"
+ [(set_attr "type" "fp,fpstore,fpload,move,store,load")
+ (set_attr "length" "5,4,4,5,4,4")])
+
+;; Exactly the same as above, except that all `e' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `e' reg
+;; when -mno-fpu.
+
+(define_insn "*movtf_no_e_insn"
+ [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=r,Q,&r")
+ (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "r,r,Q"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_quad (operands);
+ return output_move_quad (operands);
+}"
+ [(set_attr "type" "move,store,load")
+ (set_attr "length" "4,5,5")])
+
+;; This is disabled because it does not work. Long doubles have only 8
+;; byte alignment. Adding an offset of 8 or 12 to an 8 byte aligned %lo may
+;; cause it to overflow. See also GO_IF_LEGITIMATE_ADDRESS.
+(define_insn "*store_tf"
+ [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i"))
+ (match_operand:TF 1 "reg_or_0_operand" "re,G"))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "0 && (reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "*
+{
+ output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
+ if (which_alternative == 0)
+ return \"std %1,[%2+%%lo(%a0)]\;std %S1,[%2+%%lo(%a0+8)]\";
+ else
+ return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\; st %%g0,[%2+%%lo(%a0+8)]\;st %%g0,[%2+%%lo(%a0+12)]\";
+}"
+ [(set_attr "type" "store")
+ (set_attr "length" "5")])
+
+;; Sparc V9 conditional move instructions.
+
+;; We can handle larger constants here for some flavors, but for now we keep
+;; it simple and only allow those constants supported by all flavours.
+;; Note that emit_conditional_move canonicalizes operands 2,3 so that operand
+;; 3 contains the constant if one is present, but we handle either for
+;; generality (sparc.c puts a constant in operand 2).
+
+(define_expand "movqicc"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (if_then_else:QI (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "arith10_operand" "")
+ (match_operand:QI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movhicc"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (if_then_else:HI (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "arith10_operand" "")
+ (match_operand:HI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arith10_operand" "")
+ (match_operand:SI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ enum machine_mode op0_mode = GET_MODE (sparc_compare_op0);
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && ((TARGET_ARCH64 && op0_mode == DImode && v9_regcmp_p (code))
+ || (op0_mode == SImode && v8plus_regcmp_p (code))))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, op0_mode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "arith10_double_operand" "")
+ (match_operand:DI 3 "arith10_double_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movtfcc"
+ [(set (match_operand:TF 0 "register_operand" "")
+ (if_then_else:TF (match_operand 1 "comparison_operator" "")
+ (match_operand:TF 2 "register_operand" "")
+ (match_operand:TF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+;; Conditional move define_insns.
+
+(define_insn "*movqi_cc_sp64"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:QI 3 "arith11_operand" "rL,0")
+ (match_operand:QI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movhi_cc_sp64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:HI 3 "arith11_operand" "rL,0")
+ (match_operand:HI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsi_cc_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:SI 3 "arith11_operand" "rL,0")
+ (match_operand:SI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+;; ??? The constraints of operands 3,4 need work.
+(define_insn "*movdi_cc_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:DI 3 "arith11_double_operand" "rLH,0")
+ (match_operand:DI 4 "arith11_double_operand" "0,rLH")))]
+ "TARGET_ARCH64"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsf_cc_sp64"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
+ "TARGET_V9 && TARGET_FPU"
+ "@
+ fmovs%C1 %x2,%3,%0
+ fmovs%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movdf_cc_sp64"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "@
+ fmovd%C1 %x2,%3,%0
+ fmovd%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movtf_cc_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "@
+ fmovq%C1 %x2,%3,%0
+ fmovq%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movqi_cc_reg_sp64"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:QI 3 "arith10_operand" "rM,0")
+ (match_operand:QI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movhi_cc_reg_sp64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:HI 3 "arith10_operand" "rM,0")
+ (match_operand:HI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsi_cc_reg_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+;; On UltraSPARC this is slightly worse than cmp/mov %icc if the register
+;; needs to be zero extended but better on average.
+(define_insn "*movsi_cc_reg_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "TARGET_V9"
+ "*
+{
+ if (! sparc_check_64 (operands[2], insn))
+ output_asm_insn (\"srl %2,0,%2\", operands);
+ if (which_alternative == 0)
+ return \"movr%D1 %2,%r3,%0\";
+ return \"movr%d1 %2,%r4,%0\";
+}"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+;; To work well this needs to know the current insn, but that is not an
+;; argument to gen_split_*.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (unspec:SI [(match_dup 1) (match_dup 3) (match_dup 4)] 9))]
+ "if (! sparc_check_64 (operands[2], NULL_RTX))
+ emit_insn (gen_v8plus_clear_high (operands[2], operands[2]));")
+
+;; A conditional move with the condition argument known to be zero extended
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")] 9))]
+ "TARGET_V9"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+;; ??? The constraints of operands 3,4 need work.
+(define_insn "*movdi_cc_reg_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:DI 3 "arith10_double_operand" "rMH,0")
+ (match_operand:DI 4 "arith10_double_operand" "0,rMH")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsf_cc_reg_sp64"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrs%D1 %2,%3,%0
+ fmovrs%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movdf_cc_reg_sp64"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (if_then_else:DF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrd%D1 %2,%3,%0
+ fmovrd%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movtf_cc_reg_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrq%D1 %2,%3,%0
+ fmovrq%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+;;- zero extension instructions
+
+;; These patterns originally accepted general_operands, however, slightly
+;; better code is generated by only accepting register_operands, and then
+;; letting combine generate the ldu[hb] insns.
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = GEN_INT (16);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_16));
+ emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
+ DONE;
+}")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "lduh %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*zero_extendqihi2_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*zero_extendqidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_48 = GEN_INT (48);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_48));
+ emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
+ DONE;
+}")
+
+(define_insn "*zero_extendhidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "lduh %1,%0"
+ [(set_attr "type" "load")])
+
+
+;; ??? Write truncdisi pattern using sra?
+
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*zero_extendsidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
+ "@
+ srl %1,0,%0
+ lduw %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+;; Zero extend a 32 bit value in a 64 bit register.
+(define_insn "v8plus_clear_high"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,Q")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r,r")] 10))]
+ "TARGET_V9"
+ "*
+if (which_alternative == 1)
+ return \"st %1,%0\";
+if (sparc_check_64 (operands[1], insn) > 0)
+ return final_sequence ? \"nop\" : \"\";
+return \"srl %1,0,%0\";
+"
+ [(set_attr "type" "shift,store")])
+
+;; Simplify comparisons of extended values.
+
+(define_insn "*cmp_zero_extendqisi2"
+ [(set (reg:CC 100)
+ (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
+ (const_int 0)))]
+ ""
+ "andcc %0,0xff,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_zero_extendqisi2_set"
+ [(set (reg:CC 100)
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "andcc %1,0xff,%0"
+ [(set_attr "type" "unary")])
+
+;; Similarly, handle SI->QI mode truncation followed by a compare.
+
+(define_insn "*cmp_siqi_trunc"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
+ (const_int 0)))]
+ ""
+ "andcc %0,0xff,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_siqi_trunc_set"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0)
+ (const_int 0)))
+ (set (match_operand:QI 0 "register_operand" "=r")
+ (match_dup 1))]
+ ""
+ "andcc %1,0xff,%0"
+ [(set_attr "type" "unary")])
+
+;;- sign extension instructions
+
+;; These patterns originally accepted general_operands, however, slightly
+;; better code is generated by only accepting register_operands, and then
+;; letting combine generate the lds[hb] insns.
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = GEN_INT (16);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_16));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
+ DONE;
+}")
+
+(define_insn "*sign_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldsh %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = GEN_INT (24);
+ int op1_subword = 0;
+ int op0_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+ if (GET_CODE (operand0) == SUBREG)
+ {
+ op0_subword = SUBREG_WORD (operand0);
+ operand0 = XEXP (operand0, 0);
+ }
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_24));
+ if (GET_MODE (operand0) != SImode)
+ operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subword);
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+}")
+
+(define_insn "*sign_extendqihi2_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = GEN_INT (24);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_24));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+}")
+
+(define_insn "*sign_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_56 = GEN_INT (56);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_56));
+ emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
+ DONE;
+}")
+
+(define_insn "*sign_extendqidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_48 = GEN_INT (48);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_48));
+ emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
+ DONE;
+}")
+
+(define_insn "*sign_extendhidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "ldsh %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*sign_extendsidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64"
+ "@
+ sra %1,0,%0
+ ldsw %1,%0"
+ [(set_attr "type" "unary,sload")
+ (set_attr "length" "1")])
+
+;; Special pattern for optimizing bit-field compares. This is needed
+;; because combine uses this as a canonical form.
+
+(define_insn "*cmp_zero_extract"
+ [(set (reg:CC 100)
+ (compare:CC
+ (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "small_int" "n")
+ (match_operand:SI 2 "small_int" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) > 19"
+ "*
+{
+ int len = INTVAL (operands[1]);
+ int pos = 32 - INTVAL (operands[2]) - len;
+ unsigned mask = ((1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc %0,%1,%%g0\";
+}")
+
+(define_insn "*cmp_zero_extract_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (match_operand:SI 1 "small_int" "n")
+ (match_operand:SI 2 "small_int" "n"))
+ (const_int 0)))]
+ "TARGET_ARCH64 && INTVAL (operands[2]) > 51"
+ "*
+{
+ int len = INTVAL (operands[1]);
+ int pos = 64 - INTVAL (operands[2]) - len;
+ unsigned HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc %0,%1,%%g0\";
+}")
+
+;; Conversions between float, double and long double.
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float_extend:DF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fstod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "extendsftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fstoq %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "extenddftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fdtoq %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fdtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float_truncate:DF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtod %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Conversion between fixed point and floating point.
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fitos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float:DF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fitod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatsitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fitoq %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Now the same for 64 bit sources.
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "fxtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float:DF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "fxtod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fxtoq %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Convert a float to an actual integer.
+;; Truncation is performed as part of the conversion.
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
+ "TARGET_FPU"
+ "fstoi %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_FPU"
+ "fdtoi %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtoi %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Now the same, for V9 targets
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
+ "TARGET_V9 && TARGET_FPU"
+ "fstox %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU"
+ "fdtox %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtox %1,%0"
+ [(set_attr "type" "fp")])
+
+;;- arithmetic instructions
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*adddi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
+ output_asm_insn (\"add %H1,%2,%H0\", xoperands);
+ else
+ output_asm_insn (\"addcc %L1,%3,%L0\;addx %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"addcc %L1,%L2,%L0\;addx %H1,%H2,%H0\";
+}"
+ [(set_attr "length" "2")])
+
+
+;; Split DImode arithmetic
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (plus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (plus:SI (plus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "arith_double_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (minus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (minus:SI (minus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+;; LTU here means "carry set"
+(define_insn "*addx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "addx %1,%2,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*subx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "subx %1,%2,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "addcc %L2,%1,%L0\;addx %H2,0,%H0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*adddi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "add %1,%2,%0")
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ add %1,%2,%0
+ fpadd32s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_insn "*cmp_cc_plus"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "addcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_plus"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (plus:DI (match_operand:DI 0 "arith_double_operand" "%r")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "addcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_plus_set"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "addcc %1,%2,%0")
+
+(define_insn "*cmp_ccx_plus_set"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_ARCH64"
+ "addcc %1,%2,%0")
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*subdi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
+ output_asm_insn (\"sub %H1,%2,%H0\", xoperands);
+ else
+ output_asm_insn (\"subcc %L1,%3,%L0\;subx %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"subcc %L1,%L2,%L0\;subx %H1,%H2,%H0\";
+}"
+ [(set_attr "length" "2")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "subcc %L1,%2,%L0\;addx %H1,0,%H0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*subdi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "sub %1,%2,%0")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ sub %1,%2,%0
+ fpsub32s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_insn "*cmp_minus_cc"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "subcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_minus_ccx"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (minus:DI (match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "subcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_minus_cc_set"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "subcc %1,%2,%0")
+
+(define_insn "*cmp_minus_ccx_set"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_ARCH64"
+ "subcc %1,%2,%0")
+
+;; Integer Multiply/Divide.
+
+;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
+;; we used. We still use them in 32 bit v9 compilers.
+;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_HARD_MUL"
+ "smul %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_expand "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_muldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*muldi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "mulx %1,%2,%0")
+
+;; V8plus wide multiply.
+(define_insn "muldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r,0")
+ (match_operand:DI 2 "arith_double_operand" "rHI,rHI")))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_V8PLUS"
+ "*
+{
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn (\"srl %L1,0,%L1\", operands);
+ if (which_alternative == 1)
+ output_asm_insn (\"sllx %H1,32,%H1\", operands);
+ if (sparc_check_64 (operands[2], insn) <= 0)
+ output_asm_insn (\"srl %L2,0,%L2\", operands);
+ if (which_alternative == 1)
+ return \"or %L1,%H1,%H1\;sllx %H2,32,%L1\;or %L2,%L1,%L1\;mulx %H1,%L1,%L0\;srlx %L0,32,%H0\";
+ else
+ return \"sllx %H1,32,%3\;sllx %H2,32,%4\;or %L1,%3,%3\;or %L2,%4,%4\;mulx %3,%4,%3\;srlx %3,32,%H0\;mov %3,%L0\";
+}"
+ [(set_attr "length" "9,8")])
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_mul_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
+ "smulcc %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_mulsidi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+;; V9 puts the 64 bit product in a 64 bit register. Only out or global
+;; registers can hold 64 bit values in the V8plus environment.
+(define_insn "mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%L0\;srlx %L0,32,%H0
+ smul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "const_mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "small_int" "I,I")))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%L0\;srlx %L0,32,%H0
+ smul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "*mulsidi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+;; Extra pattern, because sign_extend of a constant isn't valid.
+
+(define_insn "const_mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "small_int" "I")))]
+ "TARGET_HARD_MUL"
+ "*
+{
+ return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+(define_expand "smulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_smulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
+ emit_insn (gen_const_smulsi3_highpart (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_smulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
+}")
+
+(define_insn "smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (subreg:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ 1))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand 2 "small_int" "i,i"))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*smulsi3_highpart_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "smul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_smulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "smul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsidi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%L0\;srlx %L0,32,%H0
+ umul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "*umulsidi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+;; Extra pattern, because sign_extend of a constant isn't valid.
+
+(define_insn "const_umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" "")))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+(define_insn "const_umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" "")))
+ (clobber (match_scratch:SI 3 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%L0\;srlx %L0,32,%H0
+ umul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_expand "umulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_umulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
+ emit_insn (gen_const_umulsi3_highpart (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
+}")
+
+(define_insn "umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%0\;srlx %0,%3,%0
+ umul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" ""))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%0\;srlx %0,%3,%0
+ umul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*umulsi3_highpart_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "umul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_umulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" ""))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "umul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+;; The v8 architecture specifies that there must be 3 instructions between
+;; a y register write and a use of it for correct results.
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "move_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (which_alternative == 0)
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdiv %1,%2,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdiv %1,%2,%0\";
+ else
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;ld %2,%3\;sdiv %1,%3,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;ld %2,%3\;nop\;nop\;sdiv %1,%3,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 4) (const_int 7)))])
+
+(define_insn "divdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (div:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "sdivx %1,%2,%0")
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_sdiv_cc_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC 100)
+ (compare:CC (div:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdivcc %1,%2,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdivcc %1,%2,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 3) (const_int 6)))])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r,r,m")
+ (match_operand:SI 2 "move_operand" "rI,m,r")))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ output_asm_insn (\"wr %%g0,%%g0,%%y\", operands);
+ switch (which_alternative)
+ {
+ default:
+ if (TARGET_V9)
+ return \"udiv %1,%2,%0\";
+ return \"nop\;nop\;nop\;udiv %1,%2,%0\";
+ case 1:
+ return \"ld %2,%0\;nop\;nop\;udiv %1,%0,%0\";
+ case 2:
+ return \"ld %1,%0\;nop\;nop\;udiv %0,%2,%0\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (and (eq_attr "isa" "v9")
+ (eq_attr "alternative" "0"))
+ (const_int 2) (const_int 5)))])
+
+(define_insn "udivdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (udiv:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "udivx %1,%2,%0")
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_udiv_cc_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC 100)
+ (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (TARGET_V9)
+ return \"wr %%g0,%%g0,%%y\;udivcc %1,%2,%0\";
+ else
+ return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udivcc %1,%2,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 2) (const_int 5)))])
+
+; sparclet multiply/accumulate insns
+
+(define_insn "*smacsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (match_operand:SI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "smac %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_insn "*smacdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "smacd %1,%2,%L0"
+ [(set_attr "type" "imul")])
+
+(define_insn "*umacdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "umacd %1,%2,%L0"
+ [(set_attr "type" "imul")])
+
+;;- Boolean instructions
+;; We define DImode `and' so with DImode `not' we can get
+;; DImode `andn'. Other combinations are possible.
+
+(define_expand "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*anddi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"fand %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"and %L1,%3,%L0\;and %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"and %1,%2,%0\;and %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*anddi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "and %1,%2,%0")
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ and %1,%2,%0
+ fands %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+;; Split DImode logical operations requiring two instructions.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operator:DI 1 "cc_arithop" ; AND, IOR, XOR
+ [(match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 3 "arith_double_operand" "")]))]
+ "! TARGET_ARCH64 && reload_completed
+ && GET_CODE (operands[0]) == REG && REGNO (operands[0]) < 32"
+ [(set (match_dup 4) (match_op_dup:SI 1 [(match_dup 6) (match_dup 8)]))
+ (set (match_dup 5) (match_op_dup:SI 1 [(match_dup 7) (match_dup 9)]))]
+ "
+{
+ operands[4] = gen_highpart (SImode, operands[0]);
+ operands[5] = gen_lowpart (SImode, operands[0]);
+ operands[6] = gen_highpart (SImode, operands[2]);
+ operands[7] = gen_lowpart (SImode, operands[2]);
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ if (INTVAL (operands[3]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[3]);
+ operands[9] = gen_lowpart (SImode, operands[3]);
+}")
+
+(define_insn "*and_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ andn %2,%1,%0\;andn %R2,%R1,%R0
+ fandnot1 %1,%2,%0"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*and_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "andn %2,%1,%0")
+
+(define_insn "*and_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
+ ""
+ "@
+ andn %2,%1,%0
+ fandnot1s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*iordi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"for %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"or %L1,%3,%L0\;or %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"or %1,%2,%0\;or %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*iordi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "or %1,%2,%0")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ or %1,%2,%0
+ fors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+(define_insn "*or_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ orn %2,%1,%0\;orn %R2,%R1,%R0
+ fornot1 %1,%2,%0"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*or_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "orn %2,%1,%0")
+
+(define_insn "*or_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
+ ""
+ "@
+ orn %2,%1,%0
+ fornot1s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*xordi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"fxor %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"xor %L1,%3,%L0\;xor %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"xor %1,%2,%0\;xor %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
+
+(define_insn "*xordi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "xor %r1,%2,%0")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (xor:SI (match_operand:SI 1 "arith_operand" "%rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ xor %r1,%2,%0
+ fxors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" ""))))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
+;; Combine now canonicalizes to the rightmost expression.
+(define_insn "*xor_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r,b")
+ (match_operand:DI 2 "register_operand" "r,b"))))]
+ "! TARGET_ARCH64"
+ "@
+ xnor %1,%2,%0\;xnor %R1,%R2,%R0
+ fxnor %1,%2,%0"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
+
+(define_insn "*xor_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))))]
+ "TARGET_ARCH64"
+ "xnor %r1,%2,%0"
+ [(set_attr "type" "ialu")])
+
+(define_insn "*xor_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d"))))]
+ ""
+ "@
+ xnor %r1,%2,%0
+ fxnors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+;; These correspond to the above in the case where we also (or only)
+;; want to set the condition code.
+
+(define_insn "*cmp_cc_arith_op"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 2 "cc_arithop"
+ [(match_operand:SI 0 "arith_operand" "%r")
+ (match_operand:SI 1 "arith_operand" "rI")])
+ (const_int 0)))]
+ ""
+ "%A2cc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_arith_op"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 2 "cc_arithop"
+ [(match_operand:DI 0 "arith_double_operand" "%r")
+ (match_operand:DI 1 "arith_double_operand" "rHI")])
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "%A2cc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_arith_op_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 3 "cc_arithop"
+ [(match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (match_dup 3))]
+ ""
+ "%A3cc %1,%2,%0")
+
+(define_insn "*cmp_ccx_arith_op_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 3 "cc_arithop"
+ [(match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 3))]
+ "TARGET_ARCH64"
+ "%A3cc %1,%2,%0")
+
+(define_insn "*cmp_cc_xor_not"
+ [(set (reg:CC 100)
+ (compare:CC
+ (not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 1 "arith_operand" "rI")))
+ (const_int 0)))]
+ ""
+ "xnorcc %r0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_xor_not"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (not:DI (xor:DI (match_operand:DI 0 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 1 "arith_double_operand" "rHI")))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "xnorcc %r0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_xor_not_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (xor:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "xnorcc %r1,%2,%0")
+
+(define_insn "*cmp_ccx_xor_not_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_dup 1) (match_dup 2))))]
+ "TARGET_ARCH64"
+ "xnorcc %r1,%2,%0")
+
+(define_insn "*cmp_cc_arith_op_not"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 2 "cc_arithopn"
+ [(not:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (match_operand:SI 1 "reg_or_0_operand" "rJ")])
+ (const_int 0)))]
+ ""
+ "%B2cc %r1,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_arith_op_not"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 2 "cc_arithopn"
+ [(not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")])
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "%B2cc %r1,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_arith_op_not_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 3 "cc_arithopn"
+ [(not:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (match_operand:SI 2 "reg_or_0_operand" "rJ")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (match_dup 3))]
+ ""
+ "%B3cc %r2,%1,%0")
+
+(define_insn "*cmp_ccx_arith_op_not_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 3 "cc_arithopn"
+ [(not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 3))]
+ "TARGET_ARCH64"
+ "%B3cc %r2,%1,%0")
+
+;; We cannot use the "neg" pseudo insn because the Sun assembler
+;; does not know how to make it work for constants.
+
+(define_expand "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operand0,
+ gen_rtx_NEG (DImode, operand1)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*negdi2_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"subcc %%g0,%L1,%L0\;subx %%g0,%H1,%H0\";
+}"
+ [(set_attr "type" "unary")
+ ;; ??? This is wrong for TARGET_LIVE_G0 but it's not critical.
+ (set_attr "length" "2")])
+
+(define_insn "*negdi2_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "sub %%g0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "1")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (TARGET_LIVE_G0)
+ return \"and %%g0,0,%%g0\;sub %%g0,%1,%0\";
+ return \"sub %%g0,%1,%0\";
+}"
+ [(set_attr "type" "unary")
+ (set (attr "length")
+ (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1)))])
+
+(define_insn "*cmp_cc_neg"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_neg"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (neg:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_neg"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (neg:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_dup 1)))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*cmp_ccx_set_neg"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (neg:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_dup 1)))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+;; We cannot use the "not" pseudo insn because the Sun assembler
+;; does not know how to make it work for constants.
+(define_expand "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (not:DI (match_operand:DI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*one_cmpldi2_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (match_operand:DI 1 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ xnor %1,0,%0\;xnor %R1,0,%R0
+ fnot1 %1,%0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "2,1")])
+
+(define_insn "*one_cmpldi2_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "xnor %1,0,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,d")
+ (not:SI (match_operand:SI 1 "arith_operand" "r,I,d")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"xnor %1,0,%0\";
+ if (which_alternative == 2)
+ return \"fnot1s %1,%0\";
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"xnor %%g0,%1,%0\";
+}"
+ [(set_attr "type" "unary,unary,fp")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1))
+ (const_int 1)])])
+
+(define_insn "*cmp_cc_not"
+ [(set (reg:CC 100)
+ (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "xnorcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_not"
+ [(set (reg:CCX 100)
+ (compare:CCX (not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "xnorcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_not"
+ [(set (reg:CC 100)
+ (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ "! TARGET_LIVE_G0"
+ "xnorcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*cmp_ccx_set_not"
+ [(set (reg:CCX 100)
+ (compare:CCX (not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_dup 1)))]
+ "TARGET_ARCH64"
+ "xnorcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+;; Floating point arithmetic instructions.
+
+(define_insn "addtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (plus:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "faddq %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (plus:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "faddd %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fadds %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (minus:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fsubq %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (minus:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fsubd %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fsubs %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "multf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (mult:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fmulq %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fmuld %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fmuls %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "*muldf3_extend"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
+ "(TARGET_V8 || TARGET_V9) && TARGET_FPU"
+ "fsmuld %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "*multf3_extend"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "e"))
+ (float_extend:TF (match_operand:DF 2 "register_operand" "e"))))]
+ "(TARGET_V8 || TARGET_V9) && TARGET_FPU && TARGET_HARD_QUAD"
+ "fdmulq %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+;; don't have timing for quad-prec. divide.
+(define_insn "divtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (div:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fdivq %1,%2,%0"
+ [(set_attr "type" "fpdivd")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (div:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fdivd %1,%2,%0"
+ [(set_attr "type" "fpdivd")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fdivs %1,%2,%0"
+ [(set_attr "type" "fpdivs")])
+
+(define_insn "negtf2"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU"
+ "*
+{
+ /* v9: can't use fnegs, won't work with upper regs. */
+ if (which_alternative == 0)
+ return TARGET_V9 ? \"fnegd %0,%0\" : \"fnegs %0,%0\";
+ else
+ return TARGET_V9 ? \"fnegd %1,%0\;fmovd %S1,%S0\"
+ : \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (neg:DF (match_operand:DF 1 "register_operand" "0,e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fnegd %1,%0\";
+ else if (which_alternative == 0)
+ return \"fnegs %0,%0\";
+ else
+ return \"fnegs %1,%0\;fmovs %R1,%R0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fnegs %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "abstf2"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU"
+ "*
+{
+ /* v9: can't use fabss, won't work with upper regs. */
+ if (which_alternative == 0)
+ return TARGET_V9 ? \"fabsd %0,%0\" : \"fabss %0,%0\";
+ else
+ return TARGET_V9 ? \"fabsd %1,%0\;fmovd %S1,%S0\"
+ : \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (abs:DF (match_operand:DF 1 "register_operand" "0,e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fabsd %1,%0\";
+ else if (which_alternative == 0)
+ return \"fabss %0,%0\";
+ else
+ return \"fabss %1,%0\;fmovs %R1,%R0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fabss %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "sqrttf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fsqrtq %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fsqrtd %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fsqrts %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+;;- arithmetic shift instructions
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"sll %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_ashldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"sllx %1,%2,%0\";
+}")
+
+(define_insn "ashldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashift:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"sllx\");"
+ [(set_attr "length" "5,5,6")])
+
+;; Optimize (1LL<<x)-1
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=h")
+ (plus:DI (ashift:DI (const_int 1)
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int -1)))]
+ "TARGET_V8PLUS"
+ "*
+{
+ if (GET_CODE (operands[2]) == REG && REGNO (operands[2]) == REGNO (operands[0]))
+ return \"mov 1,%L0\;sllx %L0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+ return \"mov 1,%H0\;sllx %H0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+}"
+ [(set_attr "length" "4")])
+
+(define_insn "*cmp_cc_ashift_1"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "addcc %0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_ashift_1"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 1))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_dup 1) (const_int 1)))]
+ ""
+ "addcc %1,%1,%0")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"sra %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL; /* prefer generic code in this case */
+ emit_insn (gen_ashrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"srax %1,%2,%0\";
+}")
+
+(define_insn "ashrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srax\");"
+ [(set_attr "length" "5,5,6")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"srl %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_lshrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"srlx %1,%2,%0\";
+}")
+
+(define_insn "lshrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (lshiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srlx\");"
+ [(set_attr "length" "5,5,6")])
+
+;; Unconditional and other jump instructions
+;; On the Sparc, by setting the annul bit on an unconditional branch, the
+;; following insn is never executed. This saves us a nop. Dbx does not
+;; handle such branches though, so we only use them when optimizing.
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ /* Some implementations (e.g. TurboSparc) are reported to have problems
+ with
+ foo: b,a foo
+ i.e. an empty loop with the annul bit set. The workaround is to use
+ foo: b foo; nop
+ instead. */
+
+ if (flag_delayed_branch
+ && (insn_addresses[INSN_UID (operands[0])]
+ == insn_addresses[INSN_UID (insn)]))
+ return \"b %l0%#\";
+ else
+ return \"b%* %l0%(\";
+}"
+ [(set_attr "type" "uncond_branch")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+ "
+{
+ if (GET_MODE (operands[0]) != Pmode)
+ abort ();
+
+ /* In pic mode, our address differences are against the base of the
+ table. Add that base value back in; CSE ought to be able to combine
+ the two address loads. */
+ if (flag_pic)
+ {
+ rtx tmp, tmp2;
+ tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
+ tmp2 = operands[0];
+ tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
+ operands[0] = memory_address (Pmode, tmp);
+ }
+}")
+
+(define_insn "*tablejump_sp32"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "! TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "*tablejump_sp64"
+ [(set (pc) (match_operand:DI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+;; This pattern recognizes the "instruction" that appears in
+;; a function call that wants a structure value,
+;; to inform the called function if compiled with Sun CC.
+;(define_insn "*unimp_insn"
+; [(match_operand:SI 0 "immediate_operand" "")]
+; "GET_CODE (operands[0]) == CONST_INT && INTVAL (operands[0]) > 0"
+; "unimp %0"
+; [(set_attr "type" "marker")])
+
+;;- jump to subroutine
+(define_expand "call"
+ ;; Note that this expression is not used for generating RTL.
+ ;; All the RTL is generated explicitly below.
+ [(call (match_operand 0 "call_operand" "")
+ (match_operand 3 "" "i"))]
+ ;; operands[2] is next_arg_register
+ ;; operands[3] is struct_value_size_rtx.
+ ""
+ "
+{
+ rtx fn_rtx, nregs_rtx;
+
+ if (GET_MODE (operands[0]) != FUNCTION_MODE)
+ abort ();
+
+ if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
+ {
+ /* This is really a PIC sequence. We want to represent
+ it as a funny jump so its delay slots can be filled.
+
+ ??? But if this really *is* a CALL, will not it clobber the
+ call-clobbered registers? We lose this if it is a JUMP_INSN.
+ Why cannot we have delay slots filled if it were a CALL? */
+
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
+ emit_jump_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (3,
+ gen_rtx_SET (VOIDmode, pc_rtx,
+ XEXP (operands[0], 0)),
+ GEN_INT (INTVAL (operands[3]) & 0xfff),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ else
+ emit_jump_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, pc_rtx,
+ XEXP (operands[0], 0)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ goto finish_call;
+ }
+
+ fn_rtx = operands[0];
+
+ /* Count the number of parameter registers being used by this call.
+ if that argument is NULL, it means we are using them all, which
+ means 6 on the sparc. */
+#if 0
+ if (operands[2])
+ nregs_rtx = GEN_INT (REGNO (operands[2]) - 8);
+ else
+ nregs_rtx = GEN_INT (6);
+#else
+ nregs_rtx = const0_rtx;
+#endif
+
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
+ emit_call_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ GEN_INT (INTVAL (operands[3]) & 0xfff),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ else
+ emit_call_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+
+ finish_call:
+#if 0
+ /* If this call wants a structure value,
+ emit an unimp insn to let the called function know about this. */
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) > 0)
+ {
+ rtx insn = emit_insn (operands[3]);
+ SCHED_GROUP_P (insn) = 1;
+ }
+#endif
+
+ DONE;
+}")
+
+;; We can't use the same pattern for these two insns, because then registers
+;; in the address may not be properly reloaded.
+
+(define_insn "*call_address_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_symbolic_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_address_sp64"
+ [(call (mem:SI (match_operand:DI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_symbolic_sp64"
+ [(call (mem:SI (match_operand:DI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+;; This is a call that wants a structure value.
+;; There is no such critter for v9 (??? we may need one anyway).
+(define_insn "*call_address_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that wants a structure value.
+;; There is no such critter for v9 (??? we may need one anyway).
+(define_insn "*call_symbolic_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that may want a structure value. This is used for
+;; untyped_calls.
+(define_insn "*call_address_untyped_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
+ "call %a0,%1\;nop\;nop"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that wants a structure value.
+(define_insn "*call_symbolic_untyped_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
+ "call %a0,%1\;nop\;nop"
+ [(set_attr "type" "call_no_delay_slot")])
+
+(define_expand "call_value"
+ ;; Note that this expression is not used for generating RTL.
+ ;; All the RTL is generated explicitly below.
+ [(set (match_operand 0 "register_operand" "=rf")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 4 "" "")))]
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ ""
+ "
+{
+ rtx fn_rtx, nregs_rtx;
+ rtvec vec;
+
+ if (GET_MODE (operands[1]) != FUNCTION_MODE)
+ abort ();
+
+ fn_rtx = operands[1];
+
+#if 0
+ if (operands[3])
+ nregs_rtx = GEN_INT (REGNO (operands[3]) - 8);
+ else
+ nregs_rtx = GEN_INT (6);
+#else
+ nregs_rtx = const0_rtx;
+#endif
+
+ vec = gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)));
+
+ emit_call_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ DONE;
+}")
+
+(define_insn "*call_value_address_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "address_operand" "p"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "! TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_symbolic_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "! TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_address_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "address_operand" "p"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_symbolic_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ /* Pass constm1 to indicate that it may expect a structure value, but
+ we don't know what size it is. */
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+;; Prepare to return any type including a structure value.
+
+(define_expand "untyped_return"
+ [(match_operand:BLK 0 "memory_operand" "")
+ (match_operand 1 "" "")]
+ ""
+ "
+{
+ rtx valreg1 = gen_rtx_REG (DImode, 24);
+ rtx valreg2 = gen_rtx_REG (TARGET_ARCH64 ? TFmode : DFmode, 32);
+ rtx result = operands[0];
+
+ if (! TARGET_ARCH64)
+ {
+ rtx rtnreg = gen_rtx_REG (SImode, (leaf_function ? 15 : 31));
+ rtx value = gen_reg_rtx (SImode);
+
+ /* Fetch the instruction where we will return to and see if it's an unimp
+ instruction (the most significant 10 bits will be zero). If so,
+ update the return address to skip the unimp instruction. */
+ emit_move_insn (value,
+ gen_rtx_MEM (SImode, plus_constant (rtnreg, 8)));
+ emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
+ emit_insn (gen_update_return (rtnreg, value));
+ }
+
+ /* Reload the function value registers. */
+ emit_move_insn (valreg1, change_address (result, DImode, XEXP (result, 0)));
+ emit_move_insn (valreg2,
+ change_address (result, TARGET_ARCH64 ? TFmode : DFmode,
+ plus_constant (XEXP (result, 0), 8)));
+
+ /* Put USE insns before the return. */
+ emit_insn (gen_rtx_USE (VOIDmode, valreg1));
+ emit_insn (gen_rtx_USE (VOIDmode, valreg2));
+
+ /* Construct the return. */
+ expand_null_return ();
+
+ DONE;
+}")
+
+;; This is a bit of a hack. We're incrementing a fixed register (%i7),
+;; and parts of the compiler don't want to believe that the add is needed.
+
+(define_insn "update_return"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")] 1)]
+ "! TARGET_ARCH64"
+ "cmp %1,0\;be,a .+8\;add %0,4,%0"
+ [(set_attr "type" "multi")])
+
+(define_insn "return"
+ [(return)
+ (use (reg:SI 31))]
+ "! TARGET_EPILOGUE"
+ "* return output_return (operands);"
+ [(set_attr "type" "return")])
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (parallel [(return)
+ (use (reg:SI 31))])]
+ "sparc_return_peephole_ok (operands[0], operands[1])"
+ "return %%i7+8\;mov %Y1,%Y0")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "address_operand" "p"))]
+ ""
+ "")
+
+(define_insn "*branch_sp32"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ "! TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "*branch_sp64"
+ [(set (pc) (match_operand:DI 0 "address_operand" "p"))]
+ "TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+;; ??? Doesn't work with -mflat.
+(define_expand "nonlocal_goto"
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "" "")]
+ ""
+ "
+{
+ rtx chain = operands[0];
+ rtx fp = operands[1];
+ rtx stack = operands[2];
+ rtx lab = operands[3];
+ rtx labreg;
+
+ /* Trap instruction to flush all the register windows. */
+ emit_insn (gen_flush_register_windows ());
+
+ /* Load the fp value for the containing fn into %fp. This is needed
+ because STACK refers to %fp. Note that virtual register instantiation
+ fails if the virtual %fp isn't set from a register. */
+ if (GET_CODE (fp) != REG)
+ fp = force_reg (Pmode, fp);
+ emit_move_insn (virtual_stack_vars_rtx, fp);
+
+ /* Find the containing function's current nonlocal goto handler,
+ which will do any cleanups and then jump to the label. */
+ labreg = gen_rtx_REG (Pmode, 8);
+ emit_move_insn (labreg, lab);
+
+ /* Restore %fp from stack pointer value for containing function.
+ The restore insn that follows will move this to %sp,
+ and reload the appropriate value into %fp. */
+ emit_move_insn (frame_pointer_rtx, stack);
+
+ /* USE of frame_pointer_rtx added for consistency; not clear if
+ really needed. */
+ /*emit_insn (gen_rtx_USE (VOIDmode, frame_pointer_rtx));*/
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ /* Return, restoring reg window and jumping to goto handler. */
+ if (TARGET_V9 && GET_CODE (chain) == CONST_INT
+ && ! (INTVAL (chain) & ~(HOST_WIDE_INT)0xffffffff))
+ {
+ emit_insn (gen_goto_handler_and_restore_v9 (labreg, static_chain_rtx,
+ chain));
+ emit_barrier ();
+ DONE;
+ }
+ /* Put in the static chain register the nonlocal label address. */
+ emit_move_insn (static_chain_rtx, chain);
+ emit_insn (gen_rtx_USE (VOIDmode, static_chain_rtx));
+ emit_insn (gen_goto_handler_and_restore (labreg));
+ emit_barrier ();
+ DONE;
+}")
+
+;; Special trap insn to flush register windows.
+(define_insn "flush_register_windows"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "* return TARGET_V9 ? \"flushw\" : \"ta 3\";"
+ [(set_attr "type" "misc")])
+
+(define_insn "goto_handler_and_restore"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r")] 2)]
+ ""
+ "jmp %0+0\;restore"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+(define_insn "goto_handler_and_restore_v9"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r,r")
+ (match_operand:SI 1 "register_operand" "=r,r")
+ (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+ "TARGET_V9 && ! TARGET_ARCH64"
+ "@
+ return %0+0\;mov %2,%Y1
+ sethi %%hi(%2),%1\;return %0+0\;or %Y1,%%lo(%2),%Y1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2,3")])
+
+(define_insn "*goto_handler_and_restore_v9_sp64"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "=r,r")
+ (match_operand:DI 1 "register_operand" "=r,r")
+ (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+ "TARGET_V9 && TARGET_ARCH64"
+ "@
+ return %0+0\;mov %2,%Y1
+ sethi %%hi(%2),%1\;return %0+0\;or %Y1,%%lo(%2),%Y1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2,3")])
+
+;; Pattern for use after a setjmp to store FP and the return register
+;; into the stack area.
+
+(define_expand "setjmp"
+ [(const_int 0)]
+ ""
+ "
+{
+ if (TARGET_ARCH64)
+ emit_insn (gen_setjmp_64 ());
+ else
+ emit_insn (gen_setjmp_32 ());
+ DONE;
+}")
+
+(define_expand "setjmp_32"
+ [(set (mem:SI (plus:SI (reg:SI 14) (const_int 56))) (match_dup 0))
+ (set (mem:SI (plus:SI (reg:SI 14) (const_int 60))) (reg:SI 31))]
+ ""
+ "
+{ operands[0] = frame_pointer_rtx; }")
+
+(define_expand "setjmp_64"
+ [(set (mem:DI (plus:DI (reg:DI 14) (const_int 112))) (match_dup 0))
+ (set (mem:DI (plus:DI (reg:DI 14) (const_int 120))) (reg:DI 31))]
+ ""
+ "
+{ operands[0] = frame_pointer_rtx; }")
+
+;; Special pattern for the FLUSH instruction.
+
+(define_insn "flush"
+ [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 3)]
+ ""
+ "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
+ [(set_attr "type" "misc")])
+
+;; find first set.
+
+;; The scan instruction searches from the most significant bit while ffs
+;; searches from the least significant bit. The bit index and treatment of
+;; zero also differ. It takes at least 7 instructions to get the proper
+;; result. Here is an obvious 8 instruction sequence.
+
+(define_insn "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (ffs:SI (match_operand:SI 1 "register_operand" "r")))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_SPARCLITE || TARGET_SPARCLET"
+ "*
+{
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+;; ??? This should be a define expand, so that the extra instruction have
+;; a chance of being optimized away.
+
+;; Disabled because none of the UltraSparcs implement popc. The HAL R1
+;; does, but no one uses that and we don't have a switch for it.
+;
+;(define_insn "ffsdi2"
+; [(set (match_operand:DI 0 "register_operand" "=&r")
+; (ffs:DI (match_operand:DI 1 "register_operand" "r")))
+; (clobber (match_scratch:DI 2 "=&r"))]
+; "TARGET_ARCH64"
+; "neg %1,%2\;xnor %1,%2,%2\;popc %2,%0\;movzr %1,0,%0"
+; [(set_attr "type" "multi")
+; (set_attr "length" "4")])
+
+;; Split up troublesome insns for better scheduling. */
+
+;; The following patterns are straightforward. They can be applied
+;; either before or after register allocation.
+
+(define_split
+ [(set (match_operand 0 "splittable_symbolic_memory_operand" "")
+ (match_operand 1 "reg_or_0_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "! flag_pic"
+ [(set (match_dup 2) (high:SI (match_dup 3)))
+ (set (match_dup 4) (match_dup 1))]
+ "
+{
+ operands[3] = XEXP (operands[0], 0);
+ operands[4] = gen_rtx_MEM (GET_MODE (operands[0]),
+ gen_rtx_LO_SUM (SImode, operands[2], operands[3]));
+ MEM_IN_STRUCT_P (operands[4]) = MEM_IN_STRUCT_P (operands[0]);
+ MEM_VOLATILE_P (operands[4]) = MEM_VOLATILE_P (operands[0]);
+ RTX_UNCHANGING_P (operands[4]) = RTX_UNCHANGING_P (operands[0]);
+}")
+
+(define_split
+ [(set (match_operand 0 "splittable_immediate_memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "flag_pic"
+ [(set (match_dup 3) (match_dup 1))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[0], 0),
+ GET_MODE (operands[0]),
+ operands[2]);
+ operands[3] = gen_rtx_MEM (GET_MODE (operands[0]), addr);
+ MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[0]);
+ MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[0]);
+ RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[0]);
+}")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operand 1 "splittable_immediate_memory_operand" ""))]
+ "flag_pic"
+ [(set (match_dup 0) (match_dup 2))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[1], 0),
+ GET_MODE (operands[1]),
+ operands[0]);
+ operands[2] = gen_rtx_MEM (GET_MODE (operands[1]), addr);
+ MEM_IN_STRUCT_P (operands[2]) = MEM_IN_STRUCT_P (operands[1]);
+ MEM_VOLATILE_P (operands[2]) = MEM_VOLATILE_P (operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+}")
+
+;; Sign- and Zero-extend operations can have symbolic memory operands.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 1 "extend_op" [(match_operand 2 "splittable_immediate_memory_operand" "")]))]
+ "flag_pic"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 3)]))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[2], 0),
+ GET_MODE (operands[2]),
+ operands[0]);
+ operands[3] = gen_rtx_MEM (GET_MODE (operands[2]), addr);
+ MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[2]);
+ MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[2]);
+ RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[2]);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))]
+ "! flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST
+ || GET_CODE (operands[1]) == LABEL_REF)"
+ [(set (match_dup 0) (high:SI (match_dup 1)))
+ (set (match_dup 0)
+ (lo_sum:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+;; LABEL_REFs are not modified by `legitimize_pic_address'
+;; so do not recurse infinitely in the PIC case.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))]
+ "flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)"
+ [(set (match_dup 0) (match_dup 1))]
+ "
+{
+ operands[1] = legitimize_pic_address (operands[1], Pmode, operands[0]);
+}")
+
+;; These split sne/seq insns. The forms of the resulting insns are
+;; somewhat bogus, but they avoid extra patterns and show data dependency.
+;; Nothing will look at these in detail after splitting has occurred.
+
+;; ??? v9 DImode versions are missing because addc and subc use %icc.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+;; Peepholes go at the end.
+
+;; Optimize consecutive loads or stores into ldd and std when possible.
+;; The conditions in which we do this are very restricted and are
+;; explained in the code for {registers,memory}_ok_for_ldd functions.
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[1], 0))"
+ "stx %%g0,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[0], 0))"
+ "stx %%g0,%1")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=rf")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:SI 2 "register_operand" "=rf")
+ (match_operand:SI 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
+ "ldd %1,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "register_operand" "rf"))
+ (set (match_operand:SI 2 "memory_operand" "")
+ (match_operand:SI 3 "register_operand" "rf"))]
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "std %1,%0")
+
+(define_peephole
+ [(set (match_operand:SF 0 "register_operand" "=fr")
+ (match_operand:SF 1 "memory_operand" ""))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (match_operand:SF 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
+ "ldd %1,%0")
+
+(define_peephole
+ [(set (match_operand:SF 0 "memory_operand" "")
+ (match_operand:SF 1 "register_operand" "fr"))
+ (set (match_operand:SF 2 "memory_operand" "")
+ (match_operand:SF 3 "register_operand" "fr"))]
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "std %1,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=rf")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:SI 2 "register_operand" "=rf")
+ (match_operand:SI 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd %3,%2")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "register_operand" "rf"))
+ (set (match_operand:SI 2 "memory_operand" "")
+ (match_operand:SI 3 "register_operand" "rf"))]
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std %3,%2")
+
+(define_peephole
+ [(set (match_operand:SF 0 "register_operand" "=fr")
+ (match_operand:SF 1 "memory_operand" ""))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (match_operand:SF 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd %3,%2")
+
+(define_peephole
+ [(set (match_operand:SF 0 "memory_operand" "")
+ (match_operand:SF 1 "register_operand" "fr"))
+ (set (match_operand:SF 2 "memory_operand" "")
+ (match_operand:SF 3 "register_operand" "fr"))]
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std %3,%2")
+
+;; Optimize the case of following a reg-reg move with a test
+;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
+;; This can result from a float to fix conversion.
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (reg:CC 100)
+ (compare:CC (match_operand:SI 2 "register_operand" "r")
+ (const_int 0)))]
+ "(rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[2], operands[1]))
+ && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
+ "orcc %1,0,%0")
+
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 2 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64
+ && (rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[2], operands[1]))
+ && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
+ "orcc %1,0,%0")
+
+;; Floating-point move peepholes
+;; ??? v9: Do we want similar ones?
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_dup 0)
+ (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:DF 2 "register_operand" "=er")
+ (mem:DF (match_dup 0)))]
+ "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
+ "*
+{
+ /* Go by way of output_move_double in case the register in operand 2
+ is not properly aligned for ldd. */
+ operands[1] = gen_rtx_MEM (DFmode,
+ gen_rtx_LO_SUM (SImode, operands[0], operands[1]));
+ operands[0] = operands[2];
+ return output_move_double (operands);
+}")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_dup 0)
+ (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (mem:SF (match_dup 0)))]
+ "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
+ "ld [%0+%%lo(%a1)],%2")
+
+;; Return peepholes. First the "normal" ones.
+;; These are necessary to catch insns ending up in the epilogue delay list.
+
+(define_insn "*return_qi"
+ [(set (match_operand:QI 0 "restore_operand" "")
+ (match_operand:QI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_hi"
+ [(set (match_operand:HI 0 "restore_operand" "")
+ (match_operand:HI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_si"
+ [(set (match_operand:SI 0 "restore_operand" "")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+;; The following pattern is only generated by delayed-branch scheduling,
+;; when the insn winds up in the epilogue. This can only happen when
+;; ! TARGET_FPU because otherwise fp return values are in %f0.
+(define_insn "*return_sf_no_fpu"
+ [(set (match_operand:SF 0 "restore_operand" "r")
+ (match_operand:SF 1 "register_operand" "r"))
+ (return)]
+ "! TARGET_FPU && ! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1]))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_addsi"
+ [(set (match_operand:SI 0 "restore_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %r1,%2,%Y0\";
+ /* If operands are global or in registers, can use return */
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1])
+ && (GET_CODE (operands[2]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[2])))
+ return \"return %%i7+8\;add %Y1,%Y2,%Y0\";
+ else
+ return \"ret\;restore %r1,%2,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_di"
+ [(set (match_operand:DI 0 "restore_operand" "")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (return)]
+ "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "ret\;restore %%g0,%1,%Y0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_adddi"
+ [(set (match_operand:DI 0 "restore_operand" "")
+ (plus:DI (match_operand:DI 1 "arith_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (return)]
+ "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "ret\;restore %r1,%2,%Y0"
+ [(set_attr "type" "multi")])
+
+;; The following pattern is only generated by delayed-branch scheduling,
+;; when the insn winds up in the epilogue.
+(define_insn "*return_sf"
+ [(set (reg:SF 32)
+ (match_operand:SF 0 "register_operand" "f"))
+ (return)]
+ "! TARGET_EPILOGUE"
+ "ret\;fmovs %0,%%f0"
+ [(set_attr "type" "multi")])
+
+;; Now peepholes to do a call followed by a jump.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand_address" "ps"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))])
+ (set (pc) (label_ref (match_operand 3 "" "")))]
+ "short_branch (INSN_UID (insn), INSN_UID (operands[3])) && in_same_eh_region (insn, operands[3]) && in_same_eh_region (insn, ins1)"
+ "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "call_operand_address" "ps"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))])
+ (set (pc) (label_ref (match_operand 2 "" "")))]
+ "short_branch (INSN_UID (insn), INSN_UID (operands[2])) && in_same_eh_region (insn, operands[2]) && in_same_eh_region (insn, ins1)"
+ "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand_address" "ps"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))])
+ (set (pc) (label_ref (match_operand 3 "" "")))]
+ "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[3])) && in_same_eh_region (insn, operands[3]) && in_same_eh_region (insn, ins1)"
+ "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:DI 0 "call_operand_address" "ps"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))])
+ (set (pc) (label_ref (match_operand 2 "" "")))]
+ "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[2])) && in_same_eh_region (insn, operands[2]) && in_same_eh_region (insn, ins1)"
+ "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
+
+;; After a nonlocal goto, we need to restore the PIC register, but only
+;; if we need it. So do nothing much here, but we'll check for this in
+;; finalize_pic.
+
+(define_insn "nonlocal_goto_receiver"
+ [(unspec_volatile [(const_int 0)] 4)]
+ "flag_pic"
+ "")
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 5))]
+ ""
+ "ta 5"
+ [(set_attr "type" "misc")])
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "noov_compare_op"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand:SI 1 "arith_operand" ""))]
+ ""
+ "operands[2] = gen_compare_reg (GET_CODE (operands[0]),
+ sparc_compare_op0, sparc_compare_op1);
+ operands[3] = const0_rtx;")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CC 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
+ ""
+ "t%C0 %1"
+ [(set_attr "type" "misc")])
+
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CCX 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
+ "TARGET_V9"
+ "t%C0 %%xcc,%1"
+ [(set_attr "type" "misc")])
+
diff --git a/contrib/gcc/config/sparc/splet.h b/contrib/gcc/config/sparc/splet.h
new file mode 100644
index 0000000..23c6414
--- /dev/null
+++ b/contrib/gcc/config/sparc/splet.h
@@ -0,0 +1,53 @@
+/* Definitions of target machine for GNU compiler, for SPARClet.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* -mbroken-saverestore is not included here because the long term
+ default is -mno-broken-saverestore. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+
+/* -mlive-g0 is only supported on the sparclet. */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{"big-endian", -MASK_LITTLE_ENDIAN}, \
+{"little-endian", MASK_LITTLE_ENDIAN}, \
+{"live-g0", MASK_LIVE_G0}, \
+{"no-live-g0", -MASK_LIVE_G0}, \
+{"broken-saverestore", MASK_BROKEN_SAVERESTORE}, \
+{"no-broken-saverestore", -MASK_BROKEN_SAVERESTORE},
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{mlittle-endian:-EL} %(asm_cpu)"
+
+/* Require the user to supply crt0.o. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mlittle-endian:-EL}"
+
+/* sparclet chips are bi-endian. */
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+#undef WORDS_BIG_ENDIAN
+#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
diff --git a/contrib/gcc/config/sparc/sun4gas.h b/contrib/gcc/config/sparc/sun4gas.h
new file mode 100644
index 0000000..3cea956
--- /dev/null
+++ b/contrib/gcc/config/sparc/sun4gas.h
@@ -0,0 +1,27 @@
+/* Definitions of target machine for GNU compiler, for SunOS 4.x with gas
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* gas supports unaligned data. */
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+#define UNALIGNED_INT_ASM_OP ".uaword"
+#define UNALIGNED_SHORT_ASM_OP ".uahalf"
+
+/* defaults.h will define DWARF2_UNWIND_INFO for us. */
+#undef DWARF2_UNWIND_INFO
diff --git a/contrib/gcc/config/sparc/sun4o3.h b/contrib/gcc/config/sparc/sun4o3.h
new file mode 100644
index 0000000..10c7391
--- /dev/null
+++ b/contrib/gcc/config/sparc/sun4o3.h
@@ -0,0 +1,29 @@
+#include "sparc/sparc.h"
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(LP%d),%%o0\n\tcall .mcount\n\tor %%lo(LP%d),%%o0,%%o0\n", \
+ (LABELNO), (LABELNO))
+
+/* LINK_SPEC is needed only for SunOS 4. */
+
+#undef LINK_SPEC
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/sunos4.h b/contrib/gcc/config/sparc/sunos4.h
new file mode 100644
index 0000000..14c7a43
--- /dev/null
+++ b/contrib/gcc/config/sparc/sunos4.h
@@ -0,0 +1,49 @@
+/* Definitions of target machine for GNU compiler, for SunOS 4.x
+ Copyright (C) 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define SUNOS4_SHARED_LIBRARIES 1
+
+/* Use N_BINCL stabs. */
+
+#define DBX_USE_BINCL
+
+#include "sparc/sparc.h"
+
+/* The Sun as doesn't like unaligned data. */
+#define DWARF2_UNWIND_INFO 0
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/sysv4.h b/contrib/gcc/config/sparc/sysv4.h
new file mode 100644
index 0000000..7e90bdd
--- /dev/null
+++ b/contrib/gcc/config/sparc/sysv4.h
@@ -0,0 +1,231 @@
+/* Target definitions for GNU compiler for Sparc running System V.4
+ Copyright (C) 1991, 92, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+/* Undefine some symbols which are defined in "sparc.h" but which are
+ appropriate only for SunOS 4.x, and not for svr4. */
+
+#undef WORD_SWITCH_TAKES_ARG
+#undef ASM_OUTPUT_SOURCE_LINE
+#undef SELECT_SECTION
+#undef ASM_DECLARE_FUNCTION_NAME
+#undef TEXT_SECTION_ASM_OP
+#undef DATA_SECTION_ASM_OP
+
+#include "svr4.h"
+
+/* ??? Put back the SIZE_TYPE/PTRDIFF_TYPE definitions set by sparc.h.
+ Why, exactly, is svr4.h messing with this? Seems like the chip
+ would know best. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE (TARGET_ARCH64 ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_ARCH64 ? "long int" : "int")
+
+/* Undefined some symbols which are defined in "svr4.h" but which are
+ appropriate only for typical svr4 systems, but not for the specific
+ case of svr4 running on a Sparc. */
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#undef CONST_SECTION_ASM_OP
+#undef TYPE_OPERAND_FMT
+#undef PUSHSECTION_FORMAT
+#undef STRING_ASM_OP
+#undef COMMON_ASM_OP
+#undef SKIP_ASM_OP
+#undef SET_ASM_OP /* Has no equivalent. See ASM_OUTPUT_DEF below. */
+
+/* Provide a set of pre-definitions and pre-assertions appropriate for
+ the Sparc running svr4. __svr4__ is our extension. */
+
+#define CPP_PREDEFINES \
+"-Dsparc -Dunix -D__svr4__ -Asystem(unix) -Asystem(svr4)"
+
+/* The native assembler can't compute differences between symbols in different
+ sections when generating pic code, so we must put jump tables in the
+ text section. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Pass -K to the assembler when PIC. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+ %{fpic:-K PIC} %{fPIC:-K PIC} %(asm_cpu)"
+
+/* Must use data section for relocatable constants when pic. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) \
+{ \
+ if (flag_pic && symbolic_operand (RTX)) \
+ data_section (); \
+ else \
+ const_section (); \
+}
+
+/* Define the names of various pseudo-op used by the Sparc/svr4 assembler.
+ Note that many of these are different from the typical pseudo-ops used
+ by most svr4 assemblers. That is probably due to a (misguided?) attempt
+ to keep the Sparc/svr4 assembler somewhat compatible with the Sparc/SunOS
+ assembler. */
+
+#define STRING_ASM_OP ".asciz"
+#define COMMON_ASM_OP ".common"
+#define SKIP_ASM_OP ".skip"
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+#define UNALIGNED_INT_ASM_OP ".uaword"
+#define UNALIGNED_SHORT_ASM_OP ".uahalf"
+#define PUSHSECTION_ASM_OP ".pushsection"
+#define POPSECTION_ASM_OP ".popsection"
+
+/* This is defined in sparc.h but is not used by svr4.h. */
+#undef ASM_LONG
+#define ASM_LONG ".long"
+
+/* This is the format used to print the second operand of a .type pseudo-op
+ for the Sparc/svr4 assembler. */
+
+#define TYPE_OPERAND_FMT "#%s"
+
+/* This is the format used to print a .pushsection pseudo-op (and its operand)
+ for the Sparc/svr4 assembler. */
+
+#define PUSHSECTION_FORMAT "\t%s\t\"%s\"\n"
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+do { ASM_OUTPUT_ALIGN ((FILE), Pmode == SImode ? 2 : 3); \
+ ASM_OUTPUT_INTERNAL_LABEL ((FILE), PREFIX, NUM); \
+ } while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* Define how the Sparc registers should be numbered for Dwarf output.
+ The numbering provided here should be compatible with the native
+ svr4 SDB debugger in the Sparc/svr4 reference port. The numbering
+ is as follows:
+
+ Assembly name gcc internal regno Dwarf regno
+ ----------------------------------------------------------
+ g0-g7 0-7 0-7
+ o0-o7 8-15 8-15
+ l0-l7 16-23 16-23
+ i0-i7 24-31 24-31
+ f0-f31 32-63 40-71
+*/
+
+#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 32 ? (REGNO) : (REGNO) + 8)
+
+/* A set of symbol definitions for assembly pseudo-ops which will
+ get us switched to various sections of interest. These are used
+ in all places where we simply want to switch to a section, and
+ *not* to push the previous section name onto the assembler's
+ section names stack (as we do often in dwarfout.c). */
+
+#define TEXT_SECTION_ASM_OP ".section\t\".text\""
+#define DATA_SECTION_ASM_OP ".section\t\".data\""
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+#define CONST_SECTION_ASM_OP ".section\t\".rodata\""
+#define INIT_SECTION_ASM_OP ".section\t\".init\""
+#define FINI_SECTION_ASM_OP ".section\t\".fini\""
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t\".ctors\",#alloc,#write"
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t\".dtors\",#alloc,#write"
+#undef EH_FRAME_SECTION_ASM_OP
+#define EH_FRAME_SECTION_ASM_OP ".section\t\".eh_frame\",#alloc,#write"
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+
+#undef ASM_OUTPUT_SECTION_NAME /* Override svr4.h's definition. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (FILE, ".section\t\"%s%s\",#alloc,#execinstr\n", \
+ flag_function_sections ? ".text%" : "", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (FILE, ".section\t\"%s\",#alloc\n", (NAME)); \
+ else \
+ fprintf (FILE, ".section\t\"%s\",#alloc,#write\n", (NAME)); \
+} while (0)
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#undef FUNCTION_BLOCK_PROFILER
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ do { \
+ fprintf (FILE, "\tsethi %%hi(.LLPBX0),%%o0\n\tld [%%lo(.LLPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne LPY%d\n\tadd %%o0,%%lo(.LLPBX0),%%o0\n\tcall __bb_init_func\n\tnop\nLPY%d:\n", \
+ (LABELNO), (LABELNO)); \
+ } while (0)
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#undef BLOCK_PROFILER
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+{ \
+ int blockn = (BLOCKNO); \
+ fprintf (FILE, "\tsethi %%hi(.LLPBX2+%d),%%g1\n\tld [%%lo(.LLPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(.LLPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+}
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
diff --git a/contrib/gcc/config/sparc/t-elf b/contrib/gcc/config/sparc/t-elf
new file mode 100644
index 0000000..da9df38
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-elf
@@ -0,0 +1,39 @@
+# we need to supply our own assembly versions of libgcc1.c files,
+# since the user may not have native 'cc' available
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# crt0 is built elsewhere
+LIBGCC1_TEST =
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
+# gas build...
+#MULTILIB_OPTIONS = msoft-float mcpu=v8
+MULTILIB_OPTIONS = msoft-float
+#MULTILIB_DIRNAMES = soft v8
+MULTILIB_DIRNAMES = soft
+#MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
+MULTILIB_MATCHES = msoft-float=mno-fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Assemble startup files.
+crti.o: $(srcdir)/config/sparc/sol2-ci.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crti.o -x assembler $(srcdir)/config/sparc/sol2-ci.asm
+crtn.o: $(srcdir)/config/sparc/sol2-cn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtn.o -x assembler $(srcdir)/config/sparc/sol2-cn.asm
diff --git a/contrib/gcc/config/sparc/t-sol2 b/contrib/gcc/config/sparc/t-sol2
new file mode 100644
index 0000000..d41254a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sol2
@@ -0,0 +1,30 @@
+# we need to supply our own assembly versions of libgcc1.c files,
+# since the user may not have native 'cc' available
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
+
+# gmon build rule:
+gmon.o: $(srcdir)/config/sparc/gmon-sol2.c $(GCC_PASSES) $(CONFIG_H) stmp-int-hdrs
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) \
+ -c $(srcdir)/config/sparc/gmon-sol2.c -o gmon.o
+
+# Assemble startup files.
+crt1.o: $(srcdir)/config/sparc/sol2-c1.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crt1.o -x assembler $(srcdir)/config/sparc/sol2-c1.asm
+crti.o: $(srcdir)/config/sparc/sol2-ci.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crti.o -x assembler $(srcdir)/config/sparc/sol2-ci.asm
+crtn.o: $(srcdir)/config/sparc/sol2-cn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtn.o -x assembler $(srcdir)/config/sparc/sol2-cn.asm
+gcrt1.o: $(srcdir)/config/sparc/sol2-g1.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o gcrt1.o -x assembler $(srcdir)/config/sparc/sol2-g1.asm
+
+# We need to use -fPIC when we are using gcc to compile the routines in
+# crtstuff.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fPIC when compiling the
+# routines in crtstuff.c.
+
+CRTSTUFF_T_CFLAGS = -fPIC
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/sparc/t-sp64 b/contrib/gcc/config/sparc/t-sp64
new file mode 100644
index 0000000..99acd5d
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sp64
@@ -0,0 +1,2 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
diff --git a/contrib/gcc/config/sparc/t-sparcbare b/contrib/gcc/config/sparc/t-sparcbare
new file mode 100644
index 0000000..8bd978b
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sparcbare
@@ -0,0 +1,26 @@
+# configuration file for a bare sparc cpu
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
+# gas build...
+MULTILIB_OPTIONS = msoft-float mcpu=v8
+MULTILIB_DIRNAMES = soft v8
+MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-sparclite b/contrib/gcc/config/sparc/t-sparclite
new file mode 100644
index 0000000..7cdfbb0
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sparclite
@@ -0,0 +1,24 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spl.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS = mfpu mflat
+MULTILIB_DIRNAMES =
+MULTILIB_MATCHES = mfpu=mhard-float mfpu=mcpu?f934
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-splet b/contrib/gcc/config/sparc/t-splet
new file mode 100644
index 0000000..3409f5d
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-splet
@@ -0,0 +1,23 @@
+# configuration file for a bare sparclet cpu, aout format files
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian mlive-g0 mbroken-saverestore
+MULTILIB_DIRNAMES = little live-g0 brknsave
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-sunos40 b/contrib/gcc/config/sparc/t-sunos40
new file mode 100644
index 0000000..3e10575
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sunos40
@@ -0,0 +1,7 @@
+# SunOS 4.0.*
+# /bin/as doesn't recognize the v8 instructions, so we can't do a v8
+# multilib build.
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
diff --git a/contrib/gcc/config/sparc/t-sunos41 b/contrib/gcc/config/sparc/t-sunos41
new file mode 100644
index 0000000..5783d6a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sunos41
@@ -0,0 +1,16 @@
+# SunOS 4.1.*
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
+
+MULTILIB_OPTIONS = fpic/fPIC mcpu=v8
+MULTILIB_DIRNAMES = pic ucpic v8
+MULTILIB_MATCHES = mcpu?v8=mv8
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# The native linker doesn't handle linking -fpic code with -fPIC code. Ugh.
+# We cope by building both variants of libgcc.
+#TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/sparc/t-vxsparc b/contrib/gcc/config/sparc/t-vxsparc
new file mode 100644
index 0000000..0c7a14a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-vxsparc
@@ -0,0 +1,17 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
+
+# We don't want to build .umul, etc., because VxWorks provides them,
+# which means that libgcc1-test will fail.
+LIBGCC1_TEST =
+
+# We don't want to put exit in libgcc.a for VxWorks, because VxWorks
+# does not have _exit.
+TARGET_LIBGCC2_CFLAGS = -Dexit=unused_exit
+
+MULTILIB_OPTIONS=msoft-float mv8
+MULTILIB_DIRNAMES=soft v8
+MULTILIB_MATCHES=msoft-float=mno-fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/vxsim.h b/contrib/gcc/config/sparc/vxsim.h
new file mode 100644
index 0000000..6c80375
--- /dev/null
+++ b/contrib/gcc/config/sparc/vxsim.h
@@ -0,0 +1,131 @@
+/* Definitions of target machine for GNU compiler, for SPARC VxSim
+ Copyright 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Supposedly the same as vanilla sparc svr4, except for the stuff below: */
+#include "sparc/sysv4.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DCPU=SIMSPARCSOLARIS -D__vxworks -D__vxworks__ -Dsparc -D__svr4__ -D__SVR4 \
+ -Asystem(embedded) -Asystem(svr4) -Acpu(sparc) -Amachine(sparc)\
+ -D__GCC_NEW_VARARGS__"
+
+#undef CPP_SPEC
+#define CPP_SPEC ""
+
+#undef CC1_SPEC
+#define CC1_SPEC "-fno-builtin %{sun4:} %{target:}"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+ %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+/* However it appears that Solaris 2.0 uses the same reg numbering as
+ the old BSD-style system did. */
+
+#undef DBX_REGISTER_NUMBER
+/* Same as sparc.h */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* The Solaris 2 assembler uses .skip, not .zero, so put this back. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+#undef LINK_SPEC
+#define LINK_SPEC "-r"
+
+/* This defines which switch letters take arguments.
+ It is as in svr4.h but with -R added. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ( (CHAR) == 'D' \
+ || (CHAR) == 'U' \
+ || (CHAR) == 'o' \
+ || (CHAR) == 'e' \
+ || (CHAR) == 'u' \
+ || (CHAR) == 'I' \
+ || (CHAR) == 'm' \
+ || (CHAR) == 'L' \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'A' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z')
+
+/* ??? This does not work in SunOS 4.x, so it is not enabled in sparc.h.
+ Instead, it is enabled here, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 64
diff --git a/contrib/gcc/config/sparc/vxsparc.h b/contrib/gcc/config/sparc/vxsparc.h
new file mode 100644
index 0000000..18ce6ed
--- /dev/null
+++ b/contrib/gcc/config/sparc/vxsparc.h
@@ -0,0 +1,61 @@
+/* Definitions of target machine for GNU compiler. Vxworks SPARC version.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Contributed by David Henkel-Wallace (gumby@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* Specify what to link with. */
+/* VxWorks does all the library stuff itself. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* Provide required defaults for linker -e. */
+#undef LINK_SPEC
+#define LINK_SPEC "%{!nostdlib:%{!r*:%{!e*:-e start}}}"
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
+
+/* Note that we define CPU here even if the user has specified -ansi.
+ This violates user namespace, but the VxWorks headers, and potentially
+ user code, all explicitly rely upon the definition of CPU in order to get
+ the proper processor information. */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) -DCPU=SPARC"
+
+#undef PTRDIFF_TYPE
+#undef SIZE_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+
+#define PTRDIFF_TYPE "long int"
+#define SIZE_TYPE "unsigned int"
+#define WCHAR_TYPE "char"
+#define WCHAR_TYPE_SIZE 8
+
+/* US Software GOFAST library support. */
+#include "gofast.h"
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS INIT_GOFAST_OPTABS
diff --git a/contrib/gcc/config/sparc/x-sysv4 b/contrib/gcc/config/sparc/x-sysv4
new file mode 100644
index 0000000..2a661e3
--- /dev/null
+++ b/contrib/gcc/config/sparc/x-sysv4
@@ -0,0 +1,2 @@
+X_CFLAGS=-DSVR4
+ALLOCA=alloca.o
diff --git a/contrib/gcc/config/sparc/xm-linux.h b/contrib/gcc/config/sparc/xm-linux.h
new file mode 100644
index 0000000..691c7d1
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-linux.h
@@ -0,0 +1,26 @@
+/* Configuration for GCC for SPARC running Linux-based GNU systems.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef inhibit_libc
+#include <alloca.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
diff --git a/contrib/gcc/config/sparc/xm-lynx.h b/contrib/gcc/config/sparc/xm-lynx.h
new file mode 100644
index 0000000..90fef85
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-lynx.h
@@ -0,0 +1,39 @@
+/* Configuration for GNU C-compiler for sparc platforms running LynxOS.
+ Copyright (C) 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-lynx.h>
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* Include <sys/wait.h> to define the exit status access macros. */
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
diff --git a/contrib/gcc/config/sparc/xm-openbsd.h b/contrib/gcc/config/sparc/xm-openbsd.h
new file mode 100644
index 0000000..2df7fb3
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-openbsd.h
@@ -0,0 +1,23 @@
+/* Configuration file for an host running sparc OpenBSD.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-openbsd.h>
+#include <sparc/xm-sparc.h>
+
diff --git a/contrib/gcc/config/sparc/xm-pbd.h b/contrib/gcc/config/sparc/xm-pbd.h
new file mode 100644
index 0000000..1c3f475
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-pbd.h
@@ -0,0 +1,10 @@
+/* Host environment for the tti "Unicom" PBB 68020 boards */
+
+#include "sparc/xm-sparc.h"
+
+#define USG
+
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
+
diff --git a/contrib/gcc/config/sparc/xm-sol2.h b/contrib/gcc/config/sparc/xm-sol2.h
new file mode 100644
index 0000000..5613b08
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sol2.h
@@ -0,0 +1,4 @@
+/* If not compiled with GNU C, include the system's <alloca.h> header. */
+#ifndef __GNUC__
+#include <alloca.h>
+#endif
diff --git a/contrib/gcc/config/sparc/xm-sp64.h b/contrib/gcc/config/sparc/xm-sp64.h
new file mode 100644
index 0000000..5954aff
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sp64.h
@@ -0,0 +1,25 @@
+/* Configuration for GCC for Sparc v9 running 64-bit native.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/xm-sparc.h>
+
+/* This describes the machine the compiler is hosted on. */
+#undef HOST_BITS_PER_LONG
+#define HOST_BITS_PER_LONG 64
diff --git a/contrib/gcc/config/sparc/xm-sparc.h b/contrib/gcc/config/sparc/xm-sparc.h
new file mode 100644
index 0000000..e553a0d
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sparc.h
@@ -0,0 +1,49 @@
+/* Configuration for GNU C-compiler for Sun Sparc.
+ Copyright (C) 1988, 1993, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Doubles are stored in memory with the high order word first. This
+ matters when cross-compiling. */
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If compiled with Sun CC, the use of alloca requires this #include. */
+#ifndef __GNUC__
+#include "alloca.h"
+#endif
diff --git a/contrib/gcc/config/sparc/xm-sysv4.h b/contrib/gcc/config/sparc/xm-sysv4.h
new file mode 100644
index 0000000..6e663d1
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sysv4.h
@@ -0,0 +1,48 @@
+/* Configuration for GNU C-compiler for Sun Sparc running System V.4.
+ Copyright (C) 1992, 1993, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@netcom.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Doubles are stored in memory with the high order word first. This
+ matters when cross-compiling. */
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+#ifndef __GNUC__
+#define ONLY_INT_FIELDS
+#endif
diff --git a/contrib/gcc/config/svr3.h b/contrib/gcc/config/svr3.h
index fc6e262..3475561 100644
--- a/contrib/gcc/config/svr3.h
+++ b/contrib/gcc/config/svr3.h
@@ -1,8 +1,7 @@
-/* svr3.h -- operating system specific defines to be used when
- targeting GCC for some generic System V Release 3 system.
- Copyright (C) 1991 Free Software Foundation, Inc.
-
- Written by Ron Guilmette (rfg@netcom.com).
+/* Operating system specific defines to be used when targeting GCC for
+ generic System V Release 3 system.
+ Copyright (C) 1991, 1996 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
This file is part of GNU CC.
@@ -103,7 +102,11 @@ Boston, MA 02111-1307, USA.
#define STARTFILE_SPEC \
"%{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}}"
+#ifdef CROSS_COMPILE
+#define LIB_SPEC "-lc crtn.o%s"
+#else
#define LIB_SPEC "%{p:-L/usr/lib/libp}%{pg:-L/usr/lib/libp} -lc crtn.o%s"
+#endif
/* Special flags for the linker. I don't know what they do. */
@@ -156,14 +159,13 @@ Boston, MA 02111-1307, USA.
#undef ASM_BYTE_OP
#define ASM_BYTE_OP "\t.byte"
-/* This is how to output a reference to a user-level label named NAME.
- `assemble_name' uses this.
+/* The prefix to add to user-visible assembler symbols.
For System V Release 3 the convention is to prepend a leading
underscore onto user-level symbol names. */
-#undef ASM_OUTPUT_LABELREF
-#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "_%s", NAME)
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX "_"
/* This is how to output an internal numbered label where
PREFIX is the class of label and NUM is the number within the class.
@@ -247,29 +249,17 @@ do { \
#endif /* STACK_GROWS_DOWNWARD */
-/* Add extra sections .init and .fini, in addition to .bss from att386.h. */
+/* Add extra sections .rodata, .init and .fini. */
#undef EXTRA_SECTIONS
-#define EXTRA_SECTIONS in_const, in_bss, in_init, in_fini
+#define EXTRA_SECTIONS in_const, in_init, in_fini
#undef EXTRA_SECTION_FUNCTIONS
#define EXTRA_SECTION_FUNCTIONS \
CONST_SECTION_FUNCTION \
- BSS_SECTION_FUNCTION \
INIT_SECTION_FUNCTION \
FINI_SECTION_FUNCTION
-#define BSS_SECTION_FUNCTION \
-void \
-bss_section () \
-{ \
- if (in_section != in_bss) \
- { \
- fprintf (asm_out_file, "\t%s\n", BSS_SECTION_ASM_OP); \
- in_section = in_bss; \
- } \
-}
-
#define INIT_SECTION_FUNCTION \
void \
init_section () \
diff --git a/contrib/gcc/config/svr4.h b/contrib/gcc/config/svr4.h
index 41c6ffa..4737697 100644
--- a/contrib/gcc/config/svr4.h
+++ b/contrib/gcc/config/svr4.h
@@ -1,7 +1,7 @@
/* Operating system specific defines to be used when targeting GCC for some
generic System V Release 4 system.
- Copyright (C) 1991, 1994, 1995 Free Software Foundation, Inc.
- Contributed by Ron Guilmette (rfg@segfault.us.com).
+ Copyright (C) 1991, 94-97, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
This file is part of GNU CC.
@@ -53,16 +53,9 @@ Boston, MA 02111-1307, USA.
thing as a -T option for svr4. */
#define SWITCH_TAKES_ARG(CHAR) \
- ( (CHAR) == 'D' \
- || (CHAR) == 'U' \
- || (CHAR) == 'o' \
- || (CHAR) == 'e' \
- || (CHAR) == 'u' \
- || (CHAR) == 'I' \
- || (CHAR) == 'm' \
- || (CHAR) == 'L' \
- || (CHAR) == 'A' \
+ (DEFAULT_SWITCH_TAKES_ARG (CHAR) \
|| (CHAR) == 'h' \
+ || (CHAR) == 'x' \
|| (CHAR) == 'z')
/* This defines which multi-letter switches take arguments. On svr4,
@@ -82,7 +75,7 @@ Boston, MA 02111-1307, USA.
/* Provide an ASM_SPEC appropriate for svr4. Here we try to support as
many of the specialized svr4 assembler options as seems reasonable,
given that there are certain options which we can't (or shouldn't)
- support directly due to the fact that they conflict with other options
+ support directly due to the fact that they conflict with other options
for other svr4 tools (e.g. ld) or with other options for GCC itself.
For example, we don't support the -o (output file) or -R (remove
input file) options because GCC already handles these things. We
@@ -97,7 +90,7 @@ Boston, MA 02111-1307, USA.
#undef ASM_SPEC
#define ASM_SPEC \
- "%{V} %{v:%{!V:-V}} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
+ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*}"
/* svr4 assemblers need the `-' (indicating input from stdin) to come after
the -o option (and its argument) for some reason. If we try to put it
@@ -108,19 +101,23 @@ Boston, MA 02111-1307, USA.
messages. */
#undef ASM_FINAL_SPEC
-#define ASM_FINAL_SPEC "%{pipe:-}"
+#define ASM_FINAL_SPEC "%|"
/* Under svr4, the normal location of the `ld' and `as' programs is the
/usr/ccs/bin directory. */
+#ifndef CROSS_COMPILE
#undef MD_EXEC_PREFIX
#define MD_EXEC_PREFIX "/usr/ccs/bin/"
+#endif
/* Under svr4, the normal location of the various *crt*.o files is the
/usr/ccs/lib directory. */
+#ifndef CROSS_COMPILE
#undef MD_STARTFILE_PREFIX
#define MD_STARTFILE_PREFIX "/usr/ccs/lib/"
+#endif
/* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default
standard C library (unless we are building a shared library). */
@@ -128,12 +125,6 @@ Boston, MA 02111-1307, USA.
#undef LIB_SPEC
#define LIB_SPEC "%{!shared:%{!symbolic:-lc}}"
-/* Provide a LIBGCC_SPEC appropriate for svr4. We also want to exclude
- libgcc when -symbolic. */
-
-#undef LIBGCC_SPEC
-#define LIBGCC_SPEC "%{!shared:%{!symbolic:-lgcc}}"
-
/* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own
magical crtend.o file (see crtstuff.c) which provides part of the
support for getting C++ file-scope static object constructed before
@@ -141,7 +132,7 @@ Boston, MA 02111-1307, USA.
which is either `gcrtn.o' or `crtn.o'. */
#undef ENDFILE_SPEC
-#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o}%{!pg:crtn.o%s}"
+#define ENDFILE_SPEC "crtend.o%s %{pg:gcrtn.o%s}%{!pg:crtn.o%s}"
/* Provide a LINK_SPEC appropriate for svr4. Here we provide support
for the special GCC options -static, -shared, and -symbolic which
@@ -150,7 +141,7 @@ Boston, MA 02111-1307, USA.
support here for as many of the other svr4 linker options as seems
reasonable, given that some of them conflict with options for other
svr4 tools (e.g. the assembler). In particular, we do support the
- -h*, -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*,
+ -z*, -V, -b, -t, -Qy, -Qn, and -YP* options here, and the -e*,
-l*, -o*, -r, -s, -u*, and -L* options are directly supported
by gcc.c itself. We don't directly support the -m (generate load
map) option because that conflicts with the -m (run m4) option of
@@ -167,16 +158,27 @@ Boston, MA 02111-1307, USA.
not being done. */
#undef LINK_SPEC
-#define LINK_SPEC "%{h*} %{V} %{v:%{!V:-V}} \
+#ifdef CROSS_COMPILE
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{b} %{Wl,*:%*} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{Qy:} %{!Qn:-Qy}"
+#else
+#define LINK_SPEC "%{h*} %{v:-V} \
%{b} %{Wl,*:%*} \
%{static:-dn -Bstatic} \
- %{shared:-G -dy -z text %{!h*:%{o*:-h %*}}} \
- %{symbolic:-Bsymbolic -G -dy -z text %{!h*:%{o*:-h %*}}} \
+ %{shared:-G -dy -z text} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
%{G:-G} \
%{YP,*} \
%{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
%{!p:-Y P,/usr/ccs/lib:/usr/lib}} \
%{Qy:} %{!Qn:-Qy}"
+#endif
/* Gcc automatically adds in one of the files /usr/ccs/lib/values-Xc.o,
/usr/ccs/lib/values-Xa.o, or /usr/ccs/lib/values-Xt.o for each final
@@ -243,6 +245,10 @@ do { \
#define DWARF_DEBUGGING_INFO
+/* All ELF targets can support DWARF-2. */
+
+#define DWARF2_DEBUGGING_INFO
+
/* The numbers used to denote specific machine registers in the System V
Release 4 DWARF debugging information are quite likely to be totally
different from the numbers used in BSD stabs debugging information
@@ -259,6 +265,10 @@ do { \
#define DBX_DEBUGGING_INFO
+/* When generating stabs debugging, use N_BINCL entries. */
+
+#define DBX_USE_BINCL
+
/* Use DWARF debugging info by default. */
#ifndef PREFERRED_DEBUGGING_TYPE
@@ -320,8 +330,13 @@ while (0)
embedded stabs. */
#define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME) \
- fprintf (FILE, \
- "\t.text\n\t.stabs \"\",%d,0,0,.Letext\n.Letext:\n", N_SO)
+do \
+ { \
+ text_section (); \
+ fprintf (FILE, \
+ "\t.stabs \"\",%d,0,0,.Letext\n.Letext:\n", N_SO); \
+ } \
+while (0)
/* Define the actual types of some ANSI-mandated types. (These
definitions should work for most SVR4 systems). */
@@ -368,14 +383,13 @@ while (0)
#define ASM_OUTPUT_SKIP(FILE,SIZE) \
fprintf (FILE, "\t%s\t%u\n", SKIP_ASM_OP, (SIZE))
-/* This is how to output a reference to a user-level label named NAME.
- `assemble_name' uses this.
+/* The prefix to add to user-visible assembler symbols.
For System V Release 4 the convention is *not* to prepend a leading
underscore onto user-level symbol names. */
-#undef ASM_OUTPUT_LABELREF
-#define ASM_OUTPUT_LABELREF(FILE,NAME) fprintf (FILE, "%s", NAME)
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
/* This is how to output an internal numbered label where
PREFIX is the class of label and NUM is the number within the class.
@@ -400,7 +414,7 @@ do { \
#undef ASM_GENERATE_INTERNAL_LABEL
#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \
do { \
- sprintf (LABEL, "*.%s%d", PREFIX, NUM); \
+ sprintf (LABEL, "*.%s%d", PREFIX, (unsigned) (NUM)); \
} while (0)
/* Output the label which precedes a jumptable. Note that for all svr4
@@ -463,6 +477,13 @@ do { \
ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
} while (0)
+/* Biggest alignment supported by the object file format of this
+ machine. Use this macro to limit the alignment which can be
+ specified using the `__attribute__ ((aligned (N)))' construct. If
+ not defined, the default value is `BIGGEST_ALIGNMENT'. */
+
+#define MAX_OFILE_ALIGNMENT (32768*8)
+
/* This is the pseudo-op used to generate a 32-bit word of data with a
specific value in some section. This is the same for all known svr4
assemblers. */
@@ -573,15 +594,81 @@ dtors_section () \
}
/* Switch into a generic section.
- This is currently only used to support section attributes.
-
+
We make the section read-only and executable for a function decl,
- read-only for a const data decl, and writable for a non-const data decl. */
-#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME) \
- fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, \
- (DECL) && TREE_CODE (DECL) == FUNCTION_DECL ? "ax" : \
- (DECL) && TREE_READONLY (DECL) ? "a" : "aw")
+ read-only for a const data decl, and writable for a non-const data decl.
+
+ If the section has already been defined, we must not
+ emit the attributes here. The SVR4 assembler does not
+ recognize section redefinitions.
+ If DECL is NULL, no attributes are emitted. */
+
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+do { \
+ static struct section_info \
+ { \
+ struct section_info *next; \
+ char *name; \
+ enum sect_enum {SECT_RW, SECT_RO, SECT_EXEC} type; \
+ } *sections; \
+ struct section_info *s; \
+ char *mode; \
+ enum sect_enum type; \
+ \
+ for (s = sections; s; s = s->next) \
+ if (!strcmp (NAME, s->name)) \
+ break; \
+ \
+ if (DECL && TREE_CODE (DECL) == FUNCTION_DECL) \
+ type = SECT_EXEC, mode = "ax"; \
+ else if (DECL && DECL_READONLY_SECTION (DECL, RELOC)) \
+ type = SECT_RO, mode = "a"; \
+ else \
+ type = SECT_RW, mode = "aw"; \
+ \
+ if (s == 0) \
+ { \
+ s = (struct section_info *) xmalloc (sizeof (struct section_info)); \
+ s->name = xmalloc ((strlen (NAME) + 1) * sizeof (*NAME)); \
+ strcpy (s->name, NAME); \
+ s->type = type; \
+ s->next = sections; \
+ sections = s; \
+ fprintf (FILE, ".section\t%s,\"%s\",@progbits\n", NAME, mode); \
+ } \
+ else \
+ { \
+ if (DECL && s->type != type) \
+ error_with_decl (DECL, "%s causes a section type conflict"); \
+ \
+ fprintf (FILE, ".section\t%s\n", NAME); \
+ } \
+} while (0)
+#define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+#define UNIQUE_SECTION_P(DECL) (DECL_ONE_ONLY (DECL))
+#define UNIQUE_SECTION(DECL,RELOC) \
+do { \
+ int len; \
+ char *name, *string, *prefix; \
+ \
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (DECL)); \
+ \
+ if (! DECL_ONE_ONLY (DECL)) \
+ prefix = "."; \
+ else if (TREE_CODE (DECL) == FUNCTION_DECL) \
+ prefix = ".gnu.linkonce.t."; \
+ else if (DECL_READONLY_SECTION (DECL, RELOC)) \
+ prefix = ".gnu.linkonce.r."; \
+ else \
+ prefix = ".gnu.linkonce.d."; \
+ \
+ len = strlen (name) + strlen (prefix); \
+ string = alloca (len + 1); \
+ sprintf (string, "%s%s", prefix, name); \
+ \
+ DECL_SECTION_NAME (DECL) = build_string (len, string); \
+} while (0)
/* A C statement (sans semicolon) to output an element in the table of
global constructors. */
@@ -610,7 +697,9 @@ dtors_section () \
#define SELECT_SECTION(DECL,RELOC) \
{ \
- if (TREE_CODE (DECL) == STRING_CST) \
+ if (flag_pic && RELOC) \
+ data_section (); \
+ else if (TREE_CODE (DECL) == STRING_CST) \
{ \
if (! flag_writable_strings) \
const_section (); \
@@ -619,11 +708,7 @@ dtors_section () \
} \
else if (TREE_CODE (DECL) == VAR_DECL) \
{ \
- if ((flag_pic && RELOC) \
- || !TREE_READONLY (DECL) || TREE_SIDE_EFFECTS (DECL) \
- || !DECL_INITIAL (DECL) \
- || (DECL_INITIAL (DECL) != error_mark_node \
- && !TREE_CONSTANT (DECL_INITIAL (DECL)))) \
+ if (! DECL_READONLY_SECTION (DECL, RELOC)) \
data_section (); \
else \
const_section (); \
@@ -707,7 +792,10 @@ dtors_section () \
size_directive_output = 1; \
fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
assemble_name (FILE, NAME); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
} \
ASM_OUTPUT_LABEL(FILE, NAME); \
} while (0)
@@ -729,7 +817,10 @@ do { \
size_directive_output = 1; \
fprintf (FILE, "\t%s\t ", SIZE_ASM_OP); \
assemble_name (FILE, name); \
- fprintf (FILE, ",%d\n", int_size_in_bytes (TREE_TYPE (DECL))); \
+ putc (',', FILE); \
+ fprintf (FILE, HOST_WIDE_INT_PRINT_DEC, \
+ int_size_in_bytes (TREE_TYPE (DECL))); \
+ fputc ('\n', FILE); \
} \
} while (0)
@@ -807,7 +898,7 @@ do { \
register unsigned char *_limited_str = (unsigned char *) (STR); \
register unsigned ch; \
fprintf ((FILE), "\t%s\t\"", STRING_ASM_OP); \
- for (; ch = *_limited_str; _limited_str++) \
+ for (; (ch = *_limited_str); _limited_str++) \
{ \
register int escape; \
switch (escape = ESCAPES[ch]) \
diff --git a/contrib/gcc/config/t-freebsd b/contrib/gcc/config/t-freebsd
new file mode 100644
index 0000000..5164669
--- /dev/null
+++ b/contrib/gcc/config/t-freebsd
@@ -0,0 +1,5 @@
+# Don't run fixproto
+STMP_FIXPROTO =
+# Use only native include files
+USER_H = $(EXTRA_HEADERS) $(LANG_EXTRA_HEADERS)
+
diff --git a/contrib/gcc/config/t-gnu b/contrib/gcc/config/t-gnu
new file mode 100644
index 0000000..58969f2
--- /dev/null
+++ b/contrib/gcc/config/t-gnu
@@ -0,0 +1,13 @@
+LIBGCC1=libgcc1.null
+CROSS_LIBGCC1=libgcc1.null
+
+# The pushl in CTOR initialization interferes with frame pointer elimination.
+
+# We need to use -fPIC when we are using gcc to compile the routines in
+# crtstuff.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fPIC when compiling the
+# routines in crtstuff.c.
+
+CRTSTUFF_T_CFLAGS = -fPIC -fno-omit-frame-pointer
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/t-libc-ok b/contrib/gcc/config/t-libc-ok
index 712a4ca..43e4f5e 100644
--- a/contrib/gcc/config/t-libc-ok
+++ b/contrib/gcc/config/t-libc-ok
@@ -1,2 +1,3 @@
LIBGCC1=libgcc1.null
CROSS_LIBGCC1=libgcc1.null
+CRTSTUFF_T_FLAGS_S=-fPIC
diff --git a/contrib/gcc/config/t-linux-aout b/contrib/gcc/config/t-linux-aout
new file mode 100644
index 0000000..8826cdd
--- /dev/null
+++ b/contrib/gcc/config/t-linux-aout
@@ -0,0 +1,11 @@
+# Don't run fixproto
+STMP_FIXPROTO =
+
+# Don't install "assert.h" in gcc. We use the one in glibc.
+INSTALL_ASSERT_H =
+
+# Do not build libgcc1. Let gcc generate those functions. The GNU/Linux
+# C library can handle them.
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
diff --git a/contrib/gcc/config/t-netbsd b/contrib/gcc/config/t-netbsd
new file mode 100644
index 0000000..85d6057
--- /dev/null
+++ b/contrib/gcc/config/t-netbsd
@@ -0,0 +1,9 @@
+LIBGCC1=libgcc1.null
+CROSS_LIBGCC1=libgcc1.null
+LIBGCC1_TEST=
+
+# Don't run fixproto
+STMP_FIXPROTO =
+
+# Don't install "assert.h" in gcc. We use the one in glibc.
+INSTALL_ASSERT_H =
diff --git a/contrib/gcc/config/t-openbsd b/contrib/gcc/config/t-openbsd
new file mode 100644
index 0000000..6bd8123
--- /dev/null
+++ b/contrib/gcc/config/t-openbsd
@@ -0,0 +1,9 @@
+# Don't run fixproto
+STMP_FIXPROTO =
+
+# We don't need GCC's own include files but we do need lang specific ones.
+USER_H = ${LANG_EXTRA_HEADERS}
+INSTALL_ASSERT_H =
+
+# We don't want collisions with our mkstemps
+T_CFLAGS=-Dmkstemps=my_mkstemps
diff --git a/contrib/gcc/config/t-openbsd-thread b/contrib/gcc/config/t-openbsd-thread
new file mode 100644
index 0000000..4b25f25
--- /dev/null
+++ b/contrib/gcc/config/t-openbsd-thread
@@ -0,0 +1,5 @@
+# This is currently needed to compile libgcc2 for threads support
+TARGET_LIBGCC2_CFLAGS=-pthread
+#T_CFLAGS=-pthread
+#T_CPPFLAGS=-pthread
+
diff --git a/contrib/gcc/config/t-rtems b/contrib/gcc/config/t-rtems
new file mode 100644
index 0000000..aa0ca66
--- /dev/null
+++ b/contrib/gcc/config/t-rtems
@@ -0,0 +1,6 @@
+# RTEMS uses newlib which does not require prototype fixing
+STMP_FIXPROTO =
+
+# Don't install "assert.h" in gcc. RTEMS uses the one in newlib.
+INSTALL_ASSERT_H =
+
diff --git a/contrib/gcc/config/t-svr4 b/contrib/gcc/config/t-svr4
index d4abf48..e6be0c3 100644
--- a/contrib/gcc/config/t-svr4
+++ b/contrib/gcc/config/t-svr4
@@ -2,6 +2,7 @@
# crtstuff.c. This is only really needed when we are going to use gcc/g++
# to produce a shared library, but since we don't know ahead of time when
# we will be doing that, we just always use -fPIC when compiling the
-# routines in crtstuff.c.
+# routines in crtstuff.c. Likewise for libgcc2.c.
CRTSTUFF_T_CFLAGS = -fPIC
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/x-linux b/contrib/gcc/config/x-linux
index ea2a5f2..a7c0917 100644
--- a/contrib/gcc/config/x-linux
+++ b/contrib/gcc/config/x-linux
@@ -1,14 +1,5 @@
-# It is defined in config/xm-linux.h.
-# X_CFLAGS = -DPOSIX
-
-# The following is needed when compiling stages 2 and 3 because gcc's
-# limits.h must be picked up before /usr/include/limits.h. This is because
-# each does an #include_next of the other if the other hasn't been included.
-# /usr/include/limits.h loses if it gets found first because /usr/include is
-# at the end of the search order. When a new version of gcc is released,
-# gcc's limits.h hasn't been installed yet and hence isn't found.
-
-BOOT_CFLAGS = -O $(CFLAGS) -Iinclude
-
# Don't run fixproto
STMP_FIXPROTO =
+
+# Don't install "assert.h" in gcc. We use the one in glibc.
+INSTALL_ASSERT_H =
diff --git a/contrib/gcc/config/x-linux-aout b/contrib/gcc/config/x-linux-aout
new file mode 100644
index 0000000..36ae68a
--- /dev/null
+++ b/contrib/gcc/config/x-linux-aout
@@ -0,0 +1,14 @@
+# It is defined in config/xm-linux.h.
+# X_CFLAGS = -DPOSIX
+
+# The following is needed when compiling stages 2 and 3 because gcc's
+# limits.h must be picked up before /usr/include/limits.h. This is because
+# each does an #include_next of the other if the other hasn't been included.
+# /usr/include/limits.h loses if it gets found first because /usr/include is
+# at the end of the search order. When a new version of gcc is released,
+# gcc's limits.h hasn't been installed yet and hence isn't found.
+
+# BOOT_CFLAGS = -O $(CFLAGS) -Iinclude
+
+# Don't run fixproto
+# STMP_FIXPROTO =
diff --git a/contrib/gcc/config/xm-alloca.h b/contrib/gcc/config/xm-alloca.h
new file mode 100644
index 0000000..3dbdc37
--- /dev/null
+++ b/contrib/gcc/config/xm-alloca.h
@@ -0,0 +1,4 @@
+/* If not compiled with GNU C, use the portable alloca. */
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
diff --git a/contrib/gcc/config/xm-freebsd.h b/contrib/gcc/config/xm-freebsd.h
index f73c9aa..b71ff56 100644
--- a/contrib/gcc/config/xm-freebsd.h
+++ b/contrib/gcc/config/xm-freebsd.h
@@ -22,12 +22,3 @@ Boston, MA 02111-1307, USA. */
running FreeBSD. This file should not be specified as $xm_file itself;
instead $xm_file should be CPU/xm-freebsd.h, which should include both
CPU/xm-CPU.h and this file xm-freebsd.h. */
-
-/* FreeBSD has strerror. */
-#define HAVE_STRERROR
-
-/* We have _sys_siglist, but the declaration in <signal.h> conflicts with
- the declarations in collect2.c and mips-tfile.c, so disable the declarations
- in those files. */
-
-#define DONT_DECLARE_SYS_SIGLIST
diff --git a/contrib/gcc/config/xm-gnu.h b/contrib/gcc/config/xm-gnu.h
index 57e74f2..64e8e2f 100644
--- a/contrib/gcc/config/xm-gnu.h
+++ b/contrib/gcc/config/xm-gnu.h
@@ -1,5 +1,5 @@
/* Configuration for GNU C-compiler for hosts running GNU.
- Copyright (C) 1994, 1995 Free Software Foundation, Inc.
+ Copyright (C) 1994, 1995, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -23,9 +23,10 @@ Boston, MA 02111-1307, USA. */
instead $xm_file should be CPU/xm-gnu.h, which should include both
CPU/xm-CPU.h and this file xm-gnu.h. */
-#define HAVE_STRERROR /* GNU has strerror. */
#define POSIX /* GNU complies to POSIX.1. */
+#ifndef inhibit_libc
/* Get a definition of O_RDONLY; some of the GCC files don't include this
properly and will define it themselves to be zero. */
#include <fcntl.h>
+#endif
diff --git a/contrib/gcc/config/xm-linux.h b/contrib/gcc/config/xm-linux.h
index 5ffb0f2..2cffdb7 100644
--- a/contrib/gcc/config/xm-linux.h
+++ b/contrib/gcc/config/xm-linux.h
@@ -1,5 +1,5 @@
-/* Configuration for GCC for Intel i386 running Linux.
- Copyright (C) 1995 Free Software Foundation, Inc.
+/* Configuration for GCC for Intel i386 running Linux-based GNU systems.
+ Copyright (C) 1995, 1996, 1997 Free Software Foundation, Inc.
Contributed by H.J. Lu (hjl@nynexst.com)
This file is part of GNU CC.
@@ -19,22 +19,15 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#define HAVE_VPRINTF
-
-#define HAVE_STRERROR
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+#undef POSIX
#define POSIX
-#define DONT_DECLARE_SYS_SIGLIST
-
/* We do have one, but I'd like to use the one come with gcc since
we have been doing that for a long time with USG defined. H.J. */
-#define NO_STAB_H
+#undef HAVE_STAB_H
#undef BSTRING
#define BSTRING
-#undef bcmp
-#undef bcopy
-#undef bzero
-#undef index
-#undef rindex
diff --git a/contrib/gcc/config/xm-netbsd.h b/contrib/gcc/config/xm-netbsd.h
index 00000ce..099a923 100644
--- a/contrib/gcc/config/xm-netbsd.h
+++ b/contrib/gcc/config/xm-netbsd.h
@@ -23,5 +23,4 @@ Boston, MA 02111-1307, USA. */
instead $xm_file should be CPU/xm-netbsd.h, which should include both
CPU/xm-CPU.h and this file xm-netbsd.h. */
-#define HAVE_STRERROR
#define HAVE_VPRINTF
diff --git a/contrib/gcc/config/xm-openbsd.h b/contrib/gcc/config/xm-openbsd.h
new file mode 100644
index 0000000..74a8421
--- /dev/null
+++ b/contrib/gcc/config/xm-openbsd.h
@@ -0,0 +1,35 @@
+/* Configuration fragment for hosts running a version of OpenBSD.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+/* This file gets included by all architectures. It holds stuff
+ that ought to be defined when hosting a compiler on an OpenBSD
+ machine, independently of the architecture. It's included by
+ ${cpu_type}/xm-openbsd.h, not included directly. */
+
+/* OpenBSD is trying to be POSIX-compliant, to the point of fixing
+ problems that may occur with gcc's interpretation. */
+#undef POSIX
+#define POSIX
+
+/* Ensure we get gnu C's defaults. */
+#ifdef __GNUC__
+#define alloca __builtin_alloca
+#endif
+
+
diff --git a/contrib/gcc/config/xm-siglist.h b/contrib/gcc/config/xm-siglist.h
new file mode 100644
index 0000000..d6133d6
--- /dev/null
+++ b/contrib/gcc/config/xm-siglist.h
@@ -0,0 +1,6 @@
+/* Some systems provide no sys_siglist, but do offer the same data under
+ another name. */
+
+#define sys_siglist _sys_siglist
+#undef SYS_SIGLIST_DECLARED
+#define SYS_SIGLIST_DECLARED
diff --git a/contrib/gcc/config/xm-std32.h b/contrib/gcc/config/xm-std32.h
new file mode 100644
index 0000000..c52782e
--- /dev/null
+++ b/contrib/gcc/config/xm-std32.h
@@ -0,0 +1,34 @@
+/* Configuration for GNU C-compiler for standard 32-bit host machine.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
diff --git a/contrib/gcc/config/xm-svr3.h b/contrib/gcc/config/xm-svr3.h
index ac1000f..6f25250 100644
--- a/contrib/gcc/config/xm-svr3.h
+++ b/contrib/gcc/config/xm-svr3.h
@@ -1,5 +1,5 @@
/* Configuration for GNU C-compiler for hosts running System V Release 3
- Copyright (C) 1991, 1993 Free Software Foundation, Inc.
+ Copyright (C) 1991, 1993, 1996 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -18,15 +18,7 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#define bcopy(src,dst,len) memcpy ((dst),(src),(len))
-#define bzero(dst,len) memset ((dst),0,(len))
-#define bcmp(left,right,len) memcmp ((left),(right),(len))
-
-#define rindex strrchr
-#define index strchr
-
#define USG
-#define HAVE_VPRINTF
#ifndef SVR3
#define SVR3
diff --git a/contrib/gcc/config/xm-svr4.h b/contrib/gcc/config/xm-svr4.h
index 3008432..8534aaa 100644
--- a/contrib/gcc/config/xm-svr4.h
+++ b/contrib/gcc/config/xm-svr4.h
@@ -1,5 +1,5 @@
/* Configuration for GNU C-compiler for hosts running System V Release 4
- Copyright (C) 1988 Free Software Foundation, Inc.
+ Copyright (C) 1988, 1997 Free Software Foundation, Inc.
This file is part of GNU CC.
@@ -18,18 +18,12 @@ along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
-#define bcopy(src,dst,len) memcpy ((dst),(src),(len))
-#define bzero(dst,len) memset ((dst),0,(len))
-#define bcmp(left,right,len) memcmp ((left),(right),(len))
-
-#define rindex strrchr
-#define index strchr
-
#define USG
-#define HAVE_VPRINTF
#define POSIX
/* SVR4 provides no sys_siglist,
but does offer the same data under another name. */
#define sys_siglist _sys_siglist
+#undef SYS_SIGLIST_DECLARED
+#define SYS_SIGLIST_DECLARED
OpenPOWER on IntegriCloud