summaryrefslogtreecommitdiffstats
path: root/contrib/gcc/config/sparc
diff options
context:
space:
mode:
authorobrien <obrien@FreeBSD.org>1999-08-26 09:30:50 +0000
committerobrien <obrien@FreeBSD.org>1999-08-26 09:30:50 +0000
commit0bedf4fb30066e5e1d4342a1d3914dae7d37cba7 (patch)
tree68d8110b41afd0ebbf39167b1a4918eea667a7c5 /contrib/gcc/config/sparc
parentd4db5fb866b7ad5216abd5047774a3973b9901a9 (diff)
downloadFreeBSD-src-0bedf4fb30066e5e1d4342a1d3914dae7d37cba7.zip
FreeBSD-src-0bedf4fb30066e5e1d4342a1d3914dae7d37cba7.tar.gz
Virgin import of gcc from EGCS 1.1.2
Diffstat (limited to 'contrib/gcc/config/sparc')
-rw-r--r--contrib/gcc/config/sparc/aout.h26
-rw-r--r--contrib/gcc/config/sparc/bsd.h7
-rw-r--r--contrib/gcc/config/sparc/elf.h42
-rw-r--r--contrib/gcc/config/sparc/gmon-sol2.c429
-rw-r--r--contrib/gcc/config/sparc/lb1spc.asm784
-rw-r--r--contrib/gcc/config/sparc/lb1spl.asm246
-rw-r--r--contrib/gcc/config/sparc/linux-aout.h130
-rw-r--r--contrib/gcc/config/sparc/linux.h259
-rw-r--r--contrib/gcc/config/sparc/linux64.h245
-rw-r--r--contrib/gcc/config/sparc/lite.h38
-rw-r--r--contrib/gcc/config/sparc/litecoff.h113
-rw-r--r--contrib/gcc/config/sparc/lynx-ng.h41
-rw-r--r--contrib/gcc/config/sparc/lynx.h53
-rw-r--r--contrib/gcc/config/sparc/netbsd.h46
-rw-r--r--contrib/gcc/config/sparc/openbsd.h68
-rw-r--r--contrib/gcc/config/sparc/pbd.h184
-rw-r--r--contrib/gcc/config/sparc/rtems.h35
-rw-r--r--contrib/gcc/config/sparc/sol2-c1.asm86
-rw-r--r--contrib/gcc/config/sparc/sol2-ci.asm60
-rw-r--r--contrib/gcc/config/sparc/sol2-cn.asm54
-rw-r--r--contrib/gcc/config/sparc/sol2-g1.asm88
-rw-r--r--contrib/gcc/config/sparc/sol2-sld.h11
-rw-r--r--contrib/gcc/config/sparc/sol2.h232
-rw-r--r--contrib/gcc/config/sparc/sp64-aout.h38
-rw-r--r--contrib/gcc/config/sparc/sp64-elf.h157
-rw-r--r--contrib/gcc/config/sparc/sparc.c6461
-rw-r--r--contrib/gcc/config/sparc/sparc.h3287
-rw-r--r--contrib/gcc/config/sparc/sparc.md6684
-rw-r--r--contrib/gcc/config/sparc/splet.h53
-rw-r--r--contrib/gcc/config/sparc/sun4gas.h27
-rw-r--r--contrib/gcc/config/sparc/sun4o3.h29
-rw-r--r--contrib/gcc/config/sparc/sunos4.h49
-rw-r--r--contrib/gcc/config/sparc/sysv4.h231
-rw-r--r--contrib/gcc/config/sparc/t-elf39
-rw-r--r--contrib/gcc/config/sparc/t-sol230
-rw-r--r--contrib/gcc/config/sparc/t-sp642
-rw-r--r--contrib/gcc/config/sparc/t-sparcbare26
-rw-r--r--contrib/gcc/config/sparc/t-sparclite24
-rw-r--r--contrib/gcc/config/sparc/t-splet23
-rw-r--r--contrib/gcc/config/sparc/t-sunos407
-rw-r--r--contrib/gcc/config/sparc/t-sunos4116
-rw-r--r--contrib/gcc/config/sparc/t-vxsparc17
-rw-r--r--contrib/gcc/config/sparc/vxsim.h131
-rw-r--r--contrib/gcc/config/sparc/vxsparc.h61
-rw-r--r--contrib/gcc/config/sparc/x-sysv42
-rw-r--r--contrib/gcc/config/sparc/xm-linux.h26
-rw-r--r--contrib/gcc/config/sparc/xm-lynx.h39
-rw-r--r--contrib/gcc/config/sparc/xm-openbsd.h23
-rw-r--r--contrib/gcc/config/sparc/xm-pbd.h10
-rw-r--r--contrib/gcc/config/sparc/xm-sol2.h4
-rw-r--r--contrib/gcc/config/sparc/xm-sp64.h25
-rw-r--r--contrib/gcc/config/sparc/xm-sparc.h49
-rw-r--r--contrib/gcc/config/sparc/xm-sysv4.h48
53 files changed, 20895 insertions, 0 deletions
diff --git a/contrib/gcc/config/sparc/aout.h b/contrib/gcc/config/sparc/aout.h
new file mode 100644
index 0000000..478d710
--- /dev/null
+++ b/contrib/gcc/config/sparc/aout.h
@@ -0,0 +1,26 @@
+/* Definitions of target machine for GNU compiler, for SPARC using a.out.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h" /* SPARC definitions */
+#include "aoutos.h" /* A.out definitions */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
diff --git a/contrib/gcc/config/sparc/bsd.h b/contrib/gcc/config/sparc/bsd.h
new file mode 100644
index 0000000..761abe2
--- /dev/null
+++ b/contrib/gcc/config/sparc/bsd.h
@@ -0,0 +1,7 @@
+#include "sparc/sparc.h"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p}"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s}%{!pg:%{p:gcrt0.o%s}%{!p:crt0.o%s}}"
diff --git a/contrib/gcc/config/sparc/elf.h b/contrib/gcc/config/sparc/elf.h
new file mode 100644
index 0000000..70cb26a
--- /dev/null
+++ b/contrib/gcc/config/sparc/elf.h
@@ -0,0 +1,42 @@
+/* Definitions of target machine for GNU compiler,
+ for SPARC running in an embedded environment using the ELF file format.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sol2.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__elf__ -Acpu(sparc) -Amachine(sparc)"
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0.o%s crti.o%s crtbegin.o%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* Use the default. */
+#undef LINK_SPEC
+
+/* Don't set the target flags, this is done by the linker script */
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* FIXME: until fixed */
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 64
diff --git a/contrib/gcc/config/sparc/gmon-sol2.c b/contrib/gcc/config/sparc/gmon-sol2.c
new file mode 100644
index 0000000..2a5b898
--- /dev/null
+++ b/contrib/gcc/config/sparc/gmon-sol2.c
@@ -0,0 +1,429 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Mangled into a form that works on Sparc Solaris 2 by Mark Eichin
+ * for Cygnus Support, July 1992.
+ */
+
+#ifndef lint
+static char sccsid[] = "@(#)gmon.c 5.3 (Berkeley) 5/22/91";
+#endif /* not lint */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+#include <unistd.h>
+#include <fcntl.h>
+
+#if 0
+#include "sparc/gmon.h"
+#else
+struct phdr {
+ char *lpc;
+ char *hpc;
+ int ncnt;
+};
+#define HISTFRACTION 2
+#define HISTCOUNTER unsigned short
+#define HASHFRACTION 1
+#define ARCDENSITY 2
+#define MINARCS 50
+struct tostruct {
+ char *selfpc;
+ long count;
+ unsigned short link;
+};
+struct rawarc {
+ unsigned long raw_frompc;
+ unsigned long raw_selfpc;
+ long raw_count;
+};
+#define ROUNDDOWN(x,y) (((x)/(y))*(y))
+#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
+
+#endif
+
+/* extern mcount() asm ("mcount"); */
+/*extern*/ char *minbrk /* asm ("minbrk") */;
+
+ /*
+ * froms is actually a bunch of unsigned shorts indexing tos
+ */
+static int profiling = 3;
+static unsigned short *froms;
+static struct tostruct *tos = 0;
+static long tolimit = 0;
+static char *s_lowpc = 0;
+static char *s_highpc = 0;
+static unsigned long s_textsize = 0;
+
+static int ssiz;
+static char *sbuf;
+static int s_scale;
+ /* see profil(2) where this is describe (incorrectly) */
+#define SCALE_1_TO_1 0x10000L
+
+#define MSG "No space for profiling buffer(s)\n"
+
+static void moncontrol();
+
+void monstartup(lowpc, highpc)
+ char *lowpc;
+ char *highpc;
+{
+ int monsize;
+ char *buffer;
+ register int o;
+
+ /*
+ * round lowpc and highpc to multiples of the density we're using
+ * so the rest of the scaling (here and in gprof) stays in ints.
+ */
+ lowpc = (char *)
+ ROUNDDOWN((unsigned)lowpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_lowpc = lowpc;
+ highpc = (char *)
+ ROUNDUP((unsigned)highpc, HISTFRACTION*sizeof(HISTCOUNTER));
+ s_highpc = highpc;
+ s_textsize = highpc - lowpc;
+ monsize = (s_textsize / HISTFRACTION) + sizeof(struct phdr);
+ buffer = sbrk( monsize );
+ if ( buffer == (char *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ return;
+ }
+ froms = (unsigned short *) sbrk( s_textsize / HASHFRACTION );
+ if ( froms == (unsigned short *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ return;
+ }
+ tolimit = s_textsize * ARCDENSITY / 100;
+ if ( tolimit < MINARCS ) {
+ tolimit = MINARCS;
+ } else if ( tolimit > 65534 ) {
+ tolimit = 65534;
+ }
+ tos = (struct tostruct *) sbrk( tolimit * sizeof( struct tostruct ) );
+ if ( tos == (struct tostruct *) -1 ) {
+ write( 2 , MSG , sizeof(MSG) );
+ froms = 0;
+ tos = 0;
+ return;
+ }
+ minbrk = sbrk(0);
+ tos[0].link = 0;
+ sbuf = buffer;
+ ssiz = monsize;
+ ( (struct phdr *) buffer ) -> lpc = lowpc;
+ ( (struct phdr *) buffer ) -> hpc = highpc;
+ ( (struct phdr *) buffer ) -> ncnt = ssiz;
+ monsize -= sizeof(struct phdr);
+ if ( monsize <= 0 )
+ return;
+ o = highpc - lowpc;
+ if( monsize < o )
+#ifndef hp300
+ s_scale = ( (float) monsize / o ) * SCALE_1_TO_1;
+#else /* avoid floating point */
+ {
+ int quot = o / monsize;
+
+ if (quot >= 0x10000)
+ s_scale = 1;
+ else if (quot >= 0x100)
+ s_scale = 0x10000 / quot;
+ else if (o >= 0x800000)
+ s_scale = 0x1000000 / (o / (monsize >> 8));
+ else
+ s_scale = 0x1000000 / ((o << 8) / monsize);
+ }
+#endif
+ else
+ s_scale = SCALE_1_TO_1;
+ moncontrol(1);
+}
+
+void
+_mcleanup()
+{
+ int fd;
+ int fromindex;
+ int endfrom;
+ char *frompc;
+ int toindex;
+ struct rawarc rawarc;
+ char *profdir;
+ char *proffile;
+ char *progname;
+ char buf[PATH_MAX];
+ extern char **___Argv;
+
+ moncontrol(0);
+
+ if ((profdir = getenv("PROFDIR")) != NULL) {
+ /* If PROFDIR contains a null value, no profiling output is produced */
+ if (*profdir == '\0') {
+ return;
+ }
+
+ progname=strrchr(___Argv[0], '/');
+ if (progname == NULL)
+ progname=___Argv[0];
+ else
+ progname++;
+
+ sprintf(buf, "%s/%ld.%s", profdir, getpid(), progname);
+ proffile = buf;
+ } else {
+ proffile = "gmon.out";
+ }
+
+ fd = creat( proffile, 0666 );
+ if ( fd < 0 ) {
+ perror( proffile );
+ return;
+ }
+# ifdef DEBUG
+ fprintf( stderr , "[mcleanup] sbuf 0x%x ssiz %d\n" , sbuf , ssiz );
+# endif DEBUG
+ write( fd , sbuf , ssiz );
+ endfrom = s_textsize / (HASHFRACTION * sizeof(*froms));
+ for ( fromindex = 0 ; fromindex < endfrom ; fromindex++ ) {
+ if ( froms[fromindex] == 0 ) {
+ continue;
+ }
+ frompc = s_lowpc + (fromindex * HASHFRACTION * sizeof(*froms));
+ for (toindex=froms[fromindex]; toindex!=0; toindex=tos[toindex].link) {
+# ifdef DEBUG
+ fprintf( stderr ,
+ "[mcleanup] frompc 0x%x selfpc 0x%x count %d\n" ,
+ frompc , tos[toindex].selfpc , tos[toindex].count );
+# endif DEBUG
+ rawarc.raw_frompc = (unsigned long) frompc;
+ rawarc.raw_selfpc = (unsigned long) tos[toindex].selfpc;
+ rawarc.raw_count = tos[toindex].count;
+ write( fd , &rawarc , sizeof rawarc );
+ }
+ }
+ close( fd );
+}
+
+/*
+ * The Sparc stack frame is only held together by the frame pointers
+ * in the register windows. According to the SVR4 SPARC ABI
+ * Supplement, Low Level System Information/Operating System
+ * Interface/Software Trap Types, a type 3 trap will flush all of the
+ * register windows to the stack, which will make it possible to walk
+ * the frames and find the return addresses.
+ * However, it seems awfully expensive to incur a trap (system
+ * call) for every function call. It turns out that "call" simply puts
+ * the return address in %o7 expecting the "save" in the procedure to
+ * shift it into %i7; this means that before the "save" occurs, %o7
+ * contains the address of the call to mcount, and %i7 still contains
+ * the caller above that. The asm mcount here simply saves those
+ * registers in argument registers and branches to internal_mcount,
+ * simulating a call with arguments.
+ * Kludges:
+ * 1) the branch to internal_mcount is hard coded; it should be
+ * possible to tell asm to use the assembler-name of a symbol.
+ * 2) in theory, the function calling mcount could have saved %i7
+ * somewhere and reused the register; in practice, I *think* this will
+ * break longjmp (and maybe the debugger) but I'm not certain. (I take
+ * some comfort in the knowledge that it will break the native mcount
+ * as well.)
+ * 3) if builtin_return_address worked, this could be portable.
+ * However, it would really have to be optimized for arguments of 0
+ * and 1 and do something like what we have here in order to avoid the
+ * trap per function call performance hit.
+ * 4) the atexit and monsetup calls prevent this from simply
+ * being a leaf routine that doesn't do a "save" (and would thus have
+ * access to %o7 and %i7 directly) but the call to write() at the end
+ * would have also prevented this.
+ *
+ * -- [eichin:19920702.1107EST]
+ */
+
+/* i7 == last ret, -> frompcindex */
+/* o7 == current ret, -> selfpc */
+/* Solaris 2 libraries use _mcount. */
+asm(".global _mcount; _mcount: mov %i7,%o1; mov %o7,%o0;b,a internal_mcount");
+/* This is for compatibility with old versions of gcc which used mcount. */
+asm(".global mcount; mcount: mov %i7,%o1; mov %o7,%o0;b,a internal_mcount");
+
+static void internal_mcount(selfpc, frompcindex)
+ register char *selfpc;
+ register unsigned short *frompcindex;
+{
+ register struct tostruct *top;
+ register struct tostruct *prevtop;
+ register long toindex;
+ static char already_setup;
+
+ /*
+ * find the return address for mcount,
+ * and the return address for mcount's caller.
+ */
+
+ if(!already_setup) {
+ extern etext();
+ already_setup = 1;
+ monstartup(0, etext);
+#ifdef USE_ONEXIT
+ on_exit(_mcleanup, 0);
+#else
+ atexit(_mcleanup);
+#endif
+ }
+ /*
+ * check that we are profiling
+ * and that we aren't recursively invoked.
+ */
+ if (profiling) {
+ goto out;
+ }
+ profiling++;
+ /*
+ * check that frompcindex is a reasonable pc value.
+ * for example: signal catchers get called from the stack,
+ * not from text space. too bad.
+ */
+ frompcindex = (unsigned short *)((long)frompcindex - (long)s_lowpc);
+ if ((unsigned long)frompcindex > s_textsize) {
+ goto done;
+ }
+ frompcindex =
+ &froms[((long)frompcindex) / (HASHFRACTION * sizeof(*froms))];
+ toindex = *frompcindex;
+ if (toindex == 0) {
+ /*
+ * first time traversing this arc
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ *frompcindex = toindex;
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = 0;
+ goto done;
+ }
+ top = &tos[toindex];
+ if (top->selfpc == selfpc) {
+ /*
+ * arc at front of chain; usual case.
+ */
+ top->count++;
+ goto done;
+ }
+ /*
+ * have to go looking down chain for it.
+ * top points to what we are looking at,
+ * prevtop points to previous top.
+ * we know it is not at the head of the chain.
+ */
+ for (; /* goto done */; ) {
+ if (top->link == 0) {
+ /*
+ * top is end of the chain and none of the chain
+ * had top->selfpc == selfpc.
+ * so we allocate a new tostruct
+ * and link it to the head of the chain.
+ */
+ toindex = ++tos[0].link;
+ if (toindex >= tolimit) {
+ goto overflow;
+ }
+ top = &tos[toindex];
+ top->selfpc = selfpc;
+ top->count = 1;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+ /*
+ * otherwise, check the next arc on the chain.
+ */
+ prevtop = top;
+ top = &tos[top->link];
+ if (top->selfpc == selfpc) {
+ /*
+ * there it is.
+ * increment its count
+ * move it to the head of the chain.
+ */
+ top->count++;
+ toindex = prevtop->link;
+ prevtop->link = top->link;
+ top->link = *frompcindex;
+ *frompcindex = toindex;
+ goto done;
+ }
+
+ }
+done:
+ profiling--;
+ /* and fall through */
+out:
+ return; /* normal return restores saved registers */
+
+overflow:
+ profiling++; /* halt further profiling */
+# define TOLIMIT "mcount: tos overflow\n"
+ write(2, TOLIMIT, sizeof(TOLIMIT));
+ goto out;
+}
+
+/*
+ * Control profiling
+ * profiling is what mcount checks to see if
+ * all the data structures are ready.
+ */
+static void moncontrol(mode)
+ int mode;
+{
+ if (mode) {
+ /* start */
+ profil((unsigned short *)(sbuf + sizeof(struct phdr)),
+ ssiz - sizeof(struct phdr),
+ (int)s_lowpc, s_scale);
+ profiling = 0;
+ } else {
+ /* stop */
+ profil((unsigned short *)0, 0, 0, 0);
+ profiling = 3;
+ }
+}
diff --git a/contrib/gcc/config/sparc/lb1spc.asm b/contrib/gcc/config/sparc/lb1spc.asm
new file mode 100644
index 0000000..831f33a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lb1spc.asm
@@ -0,0 +1,784 @@
+/* This is an assembly language implementation of libgcc1.c for the sparc
+ processor.
+
+ These routines are derived from the Sparc Architecture Manual, version 8,
+ slightly edited to match the desired calling convention, and also to
+ optimize them for our purposes. */
+
+#ifdef L_mulsi3
+.text
+ .align 4
+ .global .umul
+ .proc 4
+.umul:
+ or %o0, %o1, %o4 ! logical or of multiplier and multiplicand
+ mov %o0, %y ! multiplier to Y register
+ andncc %o4, 0xfff, %o5 ! mask out lower 12 bits
+ be mul_shortway ! can do it the short way
+ andcc %g0, %g0, %o4 ! zero the partial product and clear NV cc
+ !
+ ! long multiply
+ !
+ mulscc %o4, %o1, %o4 ! first iteration of 33
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 32nd iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ ! the upper 32 bits of product are wrong, but we do not care
+ retl
+ rd %y, %o0
+ !
+ ! short multiply
+ !
+mul_shortway:
+ mulscc %o4, %o1, %o4 ! first iteration of 13
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4
+ mulscc %o4, %o1, %o4 ! 12th iteration
+ mulscc %o4, %g0, %o4 ! last iteration only shifts
+ rd %y, %o5
+ sll %o4, 12, %o4 ! left shift partial product by 12 bits
+ srl %o5, 20, %o5 ! right shift partial product by 20 bits
+ retl
+ or %o5, %o4, %o0 ! merge for true product
+#endif
+
+#ifdef L_divsi3
+/*
+ * Division and remainder, from Appendix E of the Sparc Version 8
+ * Architecture Manual, with fixes from Gordon Irlam.
+ */
+
+/*
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * m4 parameters:
+ * .div name of function to generate
+ * div div=div => %o0 / %o1; div=rem => %o0 % %o1
+ * true true=true => signed; true=false => unsigned
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+ .global .udiv
+ .align 4
+ .proc 4
+ .text
+.udiv:
+ b ready_to_divide
+ mov 0, %g3 ! result is always positive
+
+ .global .div
+ .align 4
+ .proc 4
+ .text
+.div:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge ready_to_divide ! no, go do the divide
+ xor %o1, %o0, %g3 ! compute sign in any case
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge ready_to_divide ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+
+
+ready_to_divide:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 ! ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-Sparc manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ sub %o2, 1, %o2
+
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o2, %o2
+1:
+ retl
+ mov %o2, %o0
+#endif
+
+#ifdef L_modsi3
+/* This implementation was taken from glibc:
+ *
+ * Input: dividend and divisor in %o0 and %o1 respectively.
+ *
+ * Algorithm parameters:
+ * N how many bits per iteration we try to get (4)
+ * WORDSIZE total number of bits (32)
+ *
+ * Derived constants:
+ * TOPBITS number of bits in the top decade of a number
+ *
+ * Important variables:
+ * Q the partial quotient under development (initially 0)
+ * R the remainder so far, initially the dividend
+ * ITER number of main division loop iterations required;
+ * equal to ceil(log2(quotient) / N). Note that this
+ * is the log base (2^N) of the quotient.
+ * V the current comparand, initially divisor*2^(ITER*N-1)
+ *
+ * Cost:
+ * Current estimate for non-large dividend is
+ * ceil(log2(quotient) / N) * (10 + 7N/2) + C
+ * A large dividend is one greater than 2^(31-TOPBITS) and takes a
+ * different path, as the upper bits of the quotient must be developed
+ * one bit at a time.
+ */
+.text
+ .align 4
+ .global .urem
+ .proc 4
+.urem:
+ b divide
+ mov 0, %g3 ! result always positive
+
+ .align 4
+ .global .rem
+ .proc 4
+.rem:
+ ! compute sign of result; if neither is negative, no problem
+ orcc %o1, %o0, %g0 ! either negative?
+ bge 2f ! no, go do the divide
+ mov %o0, %g3 ! sign of remainder matches %o0
+ tst %o1
+ bge 1f
+ tst %o0
+ ! %o1 is definitely negative; %o0 might also be negative
+ bge 2f ! if %o0 not negative...
+ sub %g0, %o1, %o1 ! in any case, make %o1 nonneg
+1: ! %o0 is negative, %o1 is nonnegative
+ sub %g0, %o0, %o0 ! make %o0 nonnegative
+2:
+
+ ! Ready to divide. Compute size of quotient; scale comparand.
+divide:
+ orcc %o1, %g0, %o5
+ bne 1f
+ mov %o0, %o3
+
+ ! Divide by zero trap. If it returns, return 0 (about as
+ ! wrong as possible, but that is what SunOS does...).
+ ta 0x2 !ST_DIV0
+ retl
+ clr %o0
+
+1:
+ cmp %o3, %o5 ! if %o1 exceeds %o0, done
+ blu got_result ! (and algorithm fails otherwise)
+ clr %o2
+ sethi %hi(1 << (32 - 4 - 1)), %g1
+ cmp %o3, %g1
+ blu not_really_big
+ clr %o4
+
+ ! Here the dividend is >= 2**(31-N) or so. We must be careful here,
+ ! as our usual N-at-a-shot divide step will cause overflow and havoc.
+ ! The number of bits in the result here is N*ITER+SC, where SC <= N.
+ ! Compute ITER in an unorthodox manner: know we need to shift V into
+ ! the top decade: so do not even bother to compare to R.
+ 1:
+ cmp %o5, %g1
+ bgeu 3f
+ mov 1, %g2
+ sll %o5, 4, %o5
+ b 1b
+ add %o4, 1, %o4
+
+ ! Now compute %g2.
+ 2: addcc %o5, %o5, %o5
+ bcc not_too_big
+ add %g2, 1, %g2
+
+ ! We get here if the %o1 overflowed while shifting.
+ ! This means that %o3 has the high-order bit set.
+ ! Restore %o5 and subtract from %o3.
+ sll %g1, 4, %g1 ! high order bit
+ srl %o5, 1, %o5 ! rest of %o5
+ add %o5, %g1, %o5
+ b do_single_div
+ sub %g2, 1, %g2
+
+ not_too_big:
+ 3: cmp %o5, %o3
+ blu 2b
+ nop
+ be do_single_div
+ nop
+ /* NB: these are commented out in the V8-Sparc manual as well */
+ /* (I do not understand this) */
+ ! %o5 > %o3: went too far: back up 1 step
+ ! srl %o5, 1, %o5
+ ! dec %g2
+ ! do single-bit divide steps
+ !
+ ! We have to be careful here. We know that %o3 >= %o5, so we can do the
+ ! first divide step without thinking. BUT, the others are conditional,
+ ! and are only done if %o3 >= 0. Because both %o3 and %o5 may have the high-
+ ! order bit set in the first step, just falling into the regular
+ ! division loop will mess up the first time around.
+ ! So we unroll slightly...
+ do_single_div:
+ subcc %g2, 1, %g2
+ bl end_regular_divide
+ nop
+ sub %o3, %o5, %o3
+ mov 1, %o2
+ b end_single_divloop
+ nop
+ single_divloop:
+ sll %o2, 1, %o2
+ bl 1f
+ srl %o5, 1, %o5
+ ! %o3 >= 0
+ sub %o3, %o5, %o3
+ b 2f
+ add %o2, 1, %o2
+ 1: ! %o3 < 0
+ add %o3, %o5, %o3
+ sub %o2, 1, %o2
+ 2:
+ end_single_divloop:
+ subcc %g2, 1, %g2
+ bge single_divloop
+ tst %o3
+ b,a end_regular_divide
+
+not_really_big:
+1:
+ sll %o5, 4, %o5
+ cmp %o5, %o3
+ bleu 1b
+ addcc %o4, 1, %o4
+ be got_result
+ sub %o4, 1, %o4
+
+ tst %o3 ! set up for initial iteration
+divloop:
+ sll %o2, 4, %o2
+ ! depth 1, accumulated bits 0
+ bl L1.16
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 2, accumulated bits 1
+ bl L2.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 3
+ bl L3.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 7
+ bl L4.23
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2+1), %o2
+L4.23:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (7*2-1), %o2
+
+L3.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 5
+ bl L4.21
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2+1), %o2
+
+L4.21:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (5*2-1), %o2
+
+L2.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits 1
+ bl L3.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 3
+ bl L4.19
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2+1), %o2
+
+L4.19:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (3*2-1), %o2
+
+L3.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits 1
+ bl L4.17
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2+1), %o2
+
+L4.17:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (1*2-1), %o2
+
+L1.16:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 2, accumulated bits -1
+ bl L2.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -1
+ bl L3.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -1
+ bl L4.15
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2+1), %o2
+
+L4.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-1*2-1), %o2
+
+L3.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -3
+ bl L4.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2+1), %o2
+
+L4.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-3*2-1), %o2
+
+L2.15:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 3, accumulated bits -3
+ bl L3.13
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -5
+ bl L4.11
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2+1), %o2
+
+L4.11:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-5*2-1), %o2
+
+L3.13:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ ! depth 4, accumulated bits -7
+ bl L4.9
+ srl %o5,1,%o5
+ ! remainder is positive
+ subcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2+1), %o2
+
+L4.9:
+ ! remainder is negative
+ addcc %o3,%o5,%o3
+ b 9f
+ add %o2, (-7*2-1), %o2
+
+ 9:
+end_regular_divide:
+ subcc %o4, 1, %o4
+ bge divloop
+ tst %o3
+ bl,a got_result
+ ! non-restoring fixup here (one instruction only!)
+ add %o3, %o1, %o3
+
+got_result:
+ ! check to see if answer should be < 0
+ tst %g3
+ bl,a 1f
+ sub %g0, %o3, %o3
+1:
+ retl
+ mov %o3, %o0
+
+#endif
+
diff --git a/contrib/gcc/config/sparc/lb1spl.asm b/contrib/gcc/config/sparc/lb1spl.asm
new file mode 100644
index 0000000..4c8bc30
--- /dev/null
+++ b/contrib/gcc/config/sparc/lb1spl.asm
@@ -0,0 +1,246 @@
+/* This is an assembly language implementation of libgcc1.c for the sparclite
+ processor.
+
+ These routines are all from the Sparclite User's Guide, slightly edited
+ to match the desired calling convention, and also to optimize them. */
+
+#ifdef L_udivsi3
+.text
+ .align 4
+ .global .udiv
+ .proc 04
+.udiv:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ retl
+ divscc %g1,%o1,%o0
+#endif
+
+#ifdef L_umodsi3
+.text
+ .align 4
+ .global .urem
+ .proc 04
+.urem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ tst %g0
+ divscc %o0,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ divscc %g1,%o1,%g1
+ bl 1f
+ rd %y,%o0
+ retl
+ nop
+1: retl
+ add %o0,%o1,%o0
+#endif
+
+#ifdef L_divsi3
+.text
+ .align 4
+ .global .div
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the quotient.
+.div:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %g1,%o0 ! Quotient is in %g1.
+#endif
+
+#ifdef L_modsi3
+.text
+ .align 4
+ .global .rem
+ .proc 04
+! ??? This routine could be made faster if was optimized, and if it was
+! rewritten to only calculate the remainder.
+.rem:
+ wr %g0,%g0,%y ! Not a delayed write for sparclite
+ mov %o1,%o4
+ tst %o1
+ bl,a 1f
+ sub %g0,%o4,%o4
+1: tst %o0
+ bl,a 2f
+ mov -1,%y
+2: divscc %o0,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ divscc %g1,%o4,%g1
+ be 6f
+ mov %y,%o3
+ bg 4f
+ addcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bl 5f
+ tst %g1
+ ba 5f
+ add %o3,%o4,%o3
+4: subcc %o3,%o4,%g0
+ be,a 6f
+ mov %g0,%o3
+ tst %o0
+ bge 5f
+ tst %g1
+ sub %o3,%o4,%o3
+5: bl,a 6f
+ add %g1,1,%g1
+6: tst %o1
+ bl,a 7f
+ sub %g0,%g1,%g1
+7: retl
+ mov %o3,%o0 ! Remainder is in %o3.
+#endif
diff --git a/contrib/gcc/config/sparc/linux-aout.h b/contrib/gcc/config/sparc/linux-aout.h
new file mode 100644
index 0000000..76d7653
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux-aout.h
@@ -0,0 +1,130 @@
+/* Definitions for SPARC running Linux-based GNU systems with a.out.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <aoutos.h>
+#include <sparc/sparc.h>
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
+ For now, we play safe. It may change later. */
+
+#if 0
+#undef MULTIBYTE_CHARS
+#define MULTIBYTE_CHARS 1
+#endif
+
+/* We need that too. */
+#define HANDLE_SYSV_PRAGMA
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{pg:gcrt0.o%s} %{!pg:%{p:gcrt0.o%s} %{!p:crt0.o%s}} %{static:-static}"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc GNU/Linux with a.out)");
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler,
+ and we want to retain compatibility with older gcc versions. */
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#undef LIB_SPEC
+
+#if 1
+/* We no longer link with libc_p.a or libg.a by default. If you
+ want to profile or debug the GNU/Linux C library, please add
+ -lc_p or -ggdb to LDFLAGS at the link time, respectively. */
+#define LIB_SPEC \
+"%{mieee-fp:-lieee} %{p:-lgmon} %{pg:-lgmon} %{!ggdb:-lc} %{ggdb:-lg}"
+#else
+#define LIB_SPEC \
+"%{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg -static}}}"
+#endif
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m sparclinux"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+#if 0
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. GNU/Linux does not support
+ long double yet. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/linux.h b/contrib/gcc/config/sparc/linux.h
new file mode 100644
index 0000000..7bbbfa4
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux.h
@@ -0,0 +1,259 @@
+/* Definitions for SPARC running Linux-based GNU systems with ELF.
+ Copyright (C) 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define LINUX_DEFAULT_ELF
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+/* GNU/Linux uses ctype from glibc.a. I am not sure how complete it is.
+ For now, we play safe. It may change later. */
+
+#if 0
+#undef MULTIBYTE_CHARS
+#define MULTIBYTE_CHARS 1
+#endif
+
+#ifndef USE_GNULIBC_1
+#undef DEFAULT_VTABLE_THUNKS
+#define DEFAULT_VTABLE_THUNKS 1
+#endif
+
+/* Use stabs instead of DWARF debug format. */
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include <sparc/sysv4.h>
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+/* This is for -profile to use -lc_p instead of -lc. */
+#undef CC1_SPEC
+#define CC1_SPEC "%{profile:-p} \
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc GNU/Linux with ELF)");
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -Dsparc -D__sparc__ -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#ifdef USE_GNULIBC_1
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE}"
+#else
+#define CPP_SUBTARGET_SPEC \
+"%{fPIC:-D__PIC__ -D__pic__} %{fpic:-D__PIC__ -D__pic__} %{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}"
+#endif
+
+#undef LIB_SPEC
+/* We no longer link with libc_p.a or libg.a by default. If you
+ want to profile or debug the GNU/Linux C library, please add
+ -lc_p or -ggdb to LDFLAGS at the link time, respectively. */
+#if 1
+#ifdef USE_GNULIBC_1
+#define LIB_SPEC \
+ "%{!shared: %{p:-lgmon} %{pg:-lgmon} %{profile:-lgmon -lc_p} \
+ %{!profile:%{!ggdb:-lc} %{ggdb:-lg}}}"
+#else
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+#endif
+#else
+#define LIB_SPEC \
+ "%{!shared: \
+ %{mieee-fp:-lieee} %{p:-lgmon -lc_p} %{pg:-lgmon -lc_p} \
+ %{!p:%{!pg:%{!g*:-lc} %{g*:-lg}}}}"
+#endif
+
+/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+/* If ELF is the default format, we should not use /lib/elf. */
+
+#undef LINK_SPEC
+#ifdef USE_GNULIBC_1
+#ifndef LINUX_DEFAULT_ELF
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/elf/ld-linux.so.1} \
+ %{!rpath:-rpath /lib/elf/}} %{static:-static}}}"
+#else
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.1}} \
+ %{static:-static}}}"
+#endif
+#else
+#define LINK_SPEC "-m elf32_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux.so.2}} \
+ %{static:-static}}}"
+#endif
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+/* Same as sparc.h */
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. XXX */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+
+#if 0
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. GNU/Linux does not support
+ long double yet. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/linux64.h b/contrib/gcc/config/sparc/linux64.h
new file mode 100644
index 0000000..77bc668
--- /dev/null
+++ b/contrib/gcc/config/sparc/linux64.h
@@ -0,0 +1,245 @@
+/* Definitions for 64-bit SPARC running Linux-based GNU systems with ELF.
+ Copyright 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by David S. Miller (davem@caip.rutgers.edu)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? bi-architecture support will require changes to the linker
+ related specs, among perhaps other things (multilibs). */
+/* #define SPARC_BI_ARCH */
+
+#define LINUX_DEFAULT_ELF
+
+/* Don't assume anything about the header files. */
+#define NO_IMPLICIT_EXTERN_C
+
+#undef HAVE_ATEXIT
+#define HAVE_ATEXIT
+
+#include <sparc/sysv4.h>
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+/* Output at beginning of assembler file. */
+/* The .file command should always begin the output. */
+#undef ASM_FILE_START
+#define ASM_FILE_START(FILE) \
+ do { \
+ output_file_directive (FILE, main_input_filename); \
+ fprintf (FILE, "\t.version\t\"01.01\"\n"); \
+ } while (0)
+
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-Av9a"
+
+/* Provide a STARTFILE_SPEC appropriate for GNU/Linux. Here we add
+ the GNU/Linux magical crtbegin.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main'. */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared: \
+ %{pg:gcrt1.o%s} %{!pg:%{p:gcrt1.o%s} %{!p:crt1.o%s}}}\
+ crti.o%s %{!shared:crtbegin.o%s} %{shared:crtbeginS.o%s}"
+
+/* Provide a ENDFILE_SPEC appropriate for GNU/Linux. Here we tack on
+ the GNU/Linux magical crtend.o file (see crtstuff.c) which
+ provides part of the support for getting C++ file-scope static
+ object constructed before entering `main', followed by a normal
+ GNU/Linux "finalizer" file, `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC \
+ "%{!shared:crtend.o%s} %{shared:crtendS.o%s} crtn.o%s"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64 GNU/Linux with ELF)");
+
+/* A 64 bit v9 compiler with stack-bias,
+ in a Medium/Anywhere code model environment. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT /* + MASK_HARD_QUAD */ \
+ + MASK_STACK_BIAS + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU)
+
+/* The default code model. */
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDANY
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-D__ELF__ -Dunix -D_LONGLONG -Dsparc -D__sparc__ -Dlinux -Asystem(unix) -Asystem(posix)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{fPIC:-D__PIC__ -D__pic__} \
+%{fpic:-D__PIC__ -D__pic__} \
+%{posix:-D_POSIX_SOURCE} \
+%{pthread:-D_REENTRANT} \
+"
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{shared: -lc} \
+ %{!shared: %{mieee-fp:-lieee} %{pthread:-lpthread} \
+ %{profile:-lc_p} %{!profile: -lc}}"
+
+/* Provide a LINK_SPEC appropriate for GNU/Linux. Here we provide support
+ for the special GCC options -static and -shared, which allow us to
+ link things in one of these three modes by applying the appropriate
+ combinations of options at link-time. We like to support here for
+ as many of the other GNU linker options as possible. But I don't
+ have the time to search for those flags. I am sure how to add
+ support for -soname shared_object_name. H.J.
+
+ I took out %{v:%{!V:-V}}. It is too much :-(. They can use
+ -Wl,-V.
+
+ When the -shared link option is used a final link is not being
+ done. */
+
+/* If ELF is the default format, we should not use /lib/elf. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-m elf64_sparc -Y P,/usr/lib %{shared:-shared} \
+ %{!shared: \
+ %{!ibcs: \
+ %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker /lib/ld-linux64.so.2}} \
+ %{static:-static}}} \
+%{mlittle-endian:-EL} \
+"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{V} \
+%{v:%{!V:-V}} \
+%{!Qn:-Qy} \
+%{n} \
+%{T} \
+%{Ym,*} \
+%{Wa,*:%*} \
+-s %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{mlittle-endian:-EL} \
+%(asm_cpu) %(asm_arch) \
+"
+
+/* Same as sparc.h */
+#undef DBX_REGISTER_NUMBER
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* System V Release 4 uses DWARF debugging info. Buf DWARF1 doesn't do
+ 64-bit anything, so we use DWARF2. */
+
+#undef DWARF2_DEBUGGING_INFO
+#undef DWARF_DEBUGGING_INFO
+#undef DBX_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+/* Stabs doesn't use this, and it confuses a simulator. */
+/* ??? Need to see what DWARF needs, if anything. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE)
+
+/* Define the names of various pseudo-ops used by the Sparc/svr4 assembler.
+ ??? If ints are 64 bits then UNALIGNED_INT_ASM_OP (defined elsewhere) is
+ misnamed. These should all refer to explicit sizes (half/word/xword?),
+ anything other than short/int/long/etc. */
+
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+
+/* DWARF bits. */
+
+/* Follow Irix 6 and not the Dwarf2 draft in using 64-bit offsets.
+ Obviously the Dwarf2 folks havn't tried to actually build systems
+ with their spec. On a 64-bit system, only 64-bit relocs become
+ RELATIVE relocations. */
+
+/* #define DWARF_OFFSET_SIZE PTR_SIZE */
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/lite.h b/contrib/gcc/config/sparc/lite.h
new file mode 100644
index 0000000..55c232a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lite.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler, for SPARClite w/o FPU.
+ Copyright (C) 1993, 1996 Free Software Foundation, Inc.
+ Contributed by Jim Wilson (wilson@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Dsparclite -Acpu(sparc) -Amachine(sparc)"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparclite)");
+
+/* Enable app-regs and epilogue options. Do not enable the fpu. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+
+/* US Software GOFAST library support. */
+#include "gofast.h"
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS INIT_GOFAST_OPTABS
diff --git a/contrib/gcc/config/sparc/litecoff.h b/contrib/gcc/config/sparc/litecoff.h
new file mode 100644
index 0000000..bd89e1b
--- /dev/null
+++ b/contrib/gcc/config/sparc/litecoff.h
@@ -0,0 +1,113 @@
+/* Definitions of target machine for GNU compiler, for SPARClite w/o FPU, COFF.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Written by Ken Raeburn (raeburn@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/lite.h"
+
+#undef ASM_OUTPUT_IDENT
+
+#undef SELECT_SECTION
+#undef SELECT_RTX_SECTION
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+
+#include "svr3.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Dsparclite -Acpu(sparc) -Amachine(sparc)"
+
+/* Default to stabs in COFF. */
+
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+#include "dbxcoff.h"
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef INIT_SECTION_ASM_OP
+
+/* Support the ctors and dtors sections for g++. */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t.ctors,\"x\""
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t.dtors,\"x\""
+
+/* A list of other sections which the compiler might be "in" at any
+ given time. */
+
+#undef EXTRA_SECTIONS
+#define EXTRA_SECTIONS in_const, in_ctors, in_dtors
+
+/* A list of extra section function definitions. */
+
+#undef EXTRA_SECTION_FUNCTIONS
+#define EXTRA_SECTION_FUNCTIONS \
+ CONST_SECTION_FUNCTION \
+ CTORS_SECTION_FUNCTION \
+ DTORS_SECTION_FUNCTION
+
+#define CTORS_SECTION_FUNCTION \
+void \
+ctors_section () \
+{ \
+ if (in_section != in_ctors) \
+ { \
+ fprintf (asm_out_file, "%s\n", CTORS_SECTION_ASM_OP); \
+ in_section = in_ctors; \
+ } \
+}
+
+#define DTORS_SECTION_FUNCTION \
+void \
+dtors_section () \
+{ \
+ if (in_section != in_dtors) \
+ { \
+ fprintf (asm_out_file, "%s\n", DTORS_SECTION_ASM_OP); \
+ in_section = in_dtors; \
+ } \
+}
+
+#define INT_ASM_OP ".long"
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global constructors. */
+#undef ASM_OUTPUT_CONSTRUCTOR
+#define ASM_OUTPUT_CONSTRUCTOR(FILE,NAME) \
+ do { \
+ ctors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* A C statement (sans semicolon) to output an element in the table of
+ global destructors. */
+#undef ASM_OUTPUT_DESTRUCTOR
+#define ASM_OUTPUT_DESTRUCTOR(FILE,NAME) \
+ do { \
+ dtors_section (); \
+ fprintf (FILE, "\t%s\t ", INT_ASM_OP); \
+ assemble_name (FILE, NAME); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+#undef DO_GLOBAL_CTORS_BODY
+#undef DO_GLOBAL_DTORS_BODY
diff --git a/contrib/gcc/config/sparc/lynx-ng.h b/contrib/gcc/config/sparc/lynx-ng.h
new file mode 100644
index 0000000..9e9f82c
--- /dev/null
+++ b/contrib/gcc/config/sparc/lynx-ng.h
@@ -0,0 +1,41 @@
+/* Definitions for SPARC running LynxOS, using Lynx's old as and ld.
+ Copyright (C) 1993, 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+#include <lynx-ng.h>
+
+/* ??? Must redefine to get sparclite and v8 defines. Can this be done
+ differently? */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{mthreads:-D_MULTITHREADED} \
+ %{mposix:-D_POSIX_SOURCE} \
+ %{msystem-v:-I/usr/include_v} \
+ %(cpp_cpu)"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -DLynx -DIBITS32 -Asystem(unix) -Asystem(lynx) -Acpu(sparc) -Amachine(sparc)"
+
+/* Provide required defaults for linker switches. */
+
+#undef LINK_SPEC
+#define LINK_SPEC "-e __main -T 0 %{msystem-v:-V} %{mcoff:-k}"
diff --git a/contrib/gcc/config/sparc/lynx.h b/contrib/gcc/config/sparc/lynx.h
new file mode 100644
index 0000000..99b319a
--- /dev/null
+++ b/contrib/gcc/config/sparc/lynx.h
@@ -0,0 +1,53 @@
+/* Definitions for SPARC running LynxOS.
+ Copyright (C) 1993, 1995, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+
+#undef ASM_OUTPUT_IDENT
+#undef SELECT_SECTION
+#undef SELECT_RTX_SECTION
+
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+
+#include <lynx.h>
+
+/* ??? Must redefine to get sparclite and v8 defines. Can this be done
+ differently? */
+
+#undef CPP_SPEC
+#define CPP_SPEC "%{mthreads:-D_MULTITHREADED} \
+ %{mposix:-D_POSIX_SOURCE} \
+ %{msystem-v:-I/usr/include_v} \
+ %(cpp_cpu)"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -DSPARC -DLynx -DLYNX -DIBITS32 -Asystem(unix) -Asystem(lynx) -Acpu(sparc) -Amachine(sparc)"
+
+#undef LINK_SPEC
+
+/* Sparc version of libc.a has references to libm.a (printf calls pow for
+ instance), so we must always link both. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{mthreads:-L/lib/thread/} \
+ %{msystem-v:-lc_v -lm_v -lc_v} \
+ %{!msystem-v:%{mposix:-lc_p} -lc -lm -lc}"
diff --git a/contrib/gcc/config/sparc/netbsd.h b/contrib/gcc/config/sparc/netbsd.h
new file mode 100644
index 0000000..a512f41
--- /dev/null
+++ b/contrib/gcc/config/sparc/netbsd.h
@@ -0,0 +1,46 @@
+#include <sparc/sparc.h>
+
+/* Get generic NetBSD definitions. */
+
+#include <netbsd.h>
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dunix -Dsparc -D__NetBSD__ -Asystem(unix) -Asystem(NetBSD) -Acpu(sparc) -Amachine(sparc)"
+
+/* Make gcc agree with <machine/ansi.h> */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_UNSIGNED
+#define WCHAR_UNSIGNED 0
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* This is BSD, so it wants DBX format. */
+
+#define DBX_DEBUGGING_INFO
+
+/* This is the char to use for continuation (in case we need to turn
+ continuation back on). */
+
+#define DBX_CONTIN_CHAR '?'
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Until they use ELF or something that handles dwarf2 unwinds
+ and initialization stuff better. */
+#define DWARF2_UNWIND_INFO 0
+
diff --git a/contrib/gcc/config/sparc/openbsd.h b/contrib/gcc/config/sparc/openbsd.h
new file mode 100644
index 0000000..19ece97
--- /dev/null
+++ b/contrib/gcc/config/sparc/openbsd.h
@@ -0,0 +1,68 @@
+/* Configuration file for sparc OpenBSD target.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/sparc.h>
+
+/* Get generic OpenBSD definitions. */
+#define OBSD_OLD_GAS
+#include <openbsd.h>
+
+/* Run-time target specifications. */
+#define CPP_PREDEFINES "-D__unix__ -D__sparc__ -D__OpenBSD__ -Asystem(unix) -Asystem(OpenBSD) -Acpu(sparc) -Amachine(sparc)"
+
+/* Layout of source language data types */
+
+/* This must agree with <machine/ansi.h> */
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 32
+
+/* Specific options for DBX Output. */
+
+/* This is BSD, so it wants DBX format. */
+#define DBX_DEBUGGING_INFO
+
+/* This is the char to use for continuation */
+#define DBX_CONTIN_CHAR '?'
+
+/* Stack & calling: aggregate returns. */
+
+/* Don't default to pcc-struct-return, because gcc is the only compiler, and
+ we want to retain compatibility with older gcc versions. */
+#undef DEFAULT_PCC_STRUCT_RETURN
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+/* Assembler format: exception region output. */
+
+/* All configurations that don't use elf must be explicit about not using
+ dwarf unwind information. egcs doesn't try too hard to check internal
+ configuration files... */
+#define DWARF2_UNWIND_INFO 0
+
+/* Default sparc.h does already define ASM_OUTPUT_MI_THUNK */
+
diff --git a/contrib/gcc/config/sparc/pbd.h b/contrib/gcc/config/sparc/pbd.h
new file mode 100644
index 0000000..459bffd
--- /dev/null
+++ b/contrib/gcc/config/sparc/pbd.h
@@ -0,0 +1,184 @@
+/* Definitions of target machine for GNU compiler, Citicorp/TTI Unicom PBD
+ version (using GAS and COFF (encapsulated is unacceptable) )
+ Copyright (C) 1990, 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+/* Names to predefine in the preprocessor for this target machine. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -DUnicomPBD -Dunix -D__GCC_NEW_VARARGS__ -Asystem(unix) -Acpu(sparc) -Amachine(sparc)"
+
+/* We want DBX format for use with gdb under COFF. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Generate calls to memcpy, memcmp and memset. */
+
+#define TARGET_MEM_FUNCTIONS
+
+/* we use /lib/libp/lib* when profiling */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{p:-L/usr/lib/libp} %{pg:-L/usr/lib/libp} -lc"
+
+
+/* Use crt1.o as a startup file and crtn.o as a closing file. */
+/*
+ * The loader directive file gcc.ifile defines how to merge the constructor
+ * sections into the data section. Also, since gas only puts out those
+ * sections in response to N_SETT stabs, and does not (yet) have a
+ * ".sections" directive, gcc.ifile also defines the list symbols
+ * __DTOR_LIST__ and __CTOR_LIST__.
+ *
+ * Finally, we must explicitly specify the file from libgcc.a that defines
+ * exit(), otherwise if the user specifies (for example) "-lc_s" on the
+ * command line, the wrong exit() will be used and global destructors will
+ * not get called .
+ */
+
+#define STARTFILE_SPEC \
+"%{!r: gcc.ifile%s} %{pg:gcrt1.o%s}%{!pg:%{p:mcrt1.o%s}%{!p:crt1.o%s}} \
+%{!r:_exit.o%s}"
+
+#define ENDFILE_SPEC "crtn.o%s"
+
+/* cpp has to support a #sccs directive for the /usr/include files */
+
+#define SCCS_DIRECTIVE
+
+/* LINK_SPEC is needed only for SunOS 4. */
+
+#undef LINK_SPEC
+
+/* Although the gas we use can create .ctor and .dtor sections from N_SETT
+ stabs, it does not support section directives, so we need to have the loader
+ define the lists.
+ */
+#define CTOR_LISTS_DEFINED_EXTERNALLY
+
+/* similar to default, but allows for the table defined by ld with gcc.ifile.
+ nptrs is always 0. So we need to instead check that __DTOR_LIST__[1] != 0.
+ The old check is left in so that the same macro can be used if and when
+ a future version of gas does support section directives. */
+
+#define DO_GLOBAL_DTORS_BODY {int nptrs = *(int *)__DTOR_LIST__; int i; \
+ if (nptrs == -1 || (__DTOR_LIST__[0] == 0 && __DTOR_LIST__[1] != 0)) \
+ for (nptrs = 0; __DTOR_LIST__[nptrs + 1] != 0; nptrs++); \
+ for (i = nptrs; i >= 1; i--) \
+ __DTOR_LIST__[i] (); }
+
+/*
+ * Here is an example gcc.ifile. I've tested it on PBD sparc
+ * systems. The NEXT(0x200000) works on just about all 386 and m68k systems,
+ * but can be reduced to any power of 2 that is >= NBPS (0x40000 on a pbd).
+
+ SECTIONS {
+ .text BIND(0x41000200) BLOCK (0x200) :
+ { *(.init) *(.text) vfork = fork; *(.fini) }
+
+ GROUP BIND( NEXT(0x200000) + ADDR(.text) + SIZEOF(.text)):
+ { .data : { __CTOR_LIST__ = . ; . += 4; *(.ctor) . += 4 ;
+ __DTOR_LIST__ = . ; . += 4; *(.dtor) . += 4 ; }
+ .bss : { }
+ }
+ }
+ */
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#undef USER_LABEL_PREFIX
+#define USER_LABEL_PREFIX ""
+
+/* fixes: */
+/*
+ * Internal labels are prefixed with a period.
+ */
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.%s%d", PREFIX, NUM)
+
+
+/* This is how to output an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".%s%d:\n", PREFIX, NUM)
+
+/* This is how to output an element of a case-vector that is relative. */
+
+#undef ASM_OUTPUT_ADDR_DIFF_ELT
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ fprintf (FILE, "\t.word .L%d-.L%d\n", VALUE, REL)
+
+/* This is how to output an element of a case-vector that is absolute.
+ (The 68000 does not use such vectors,
+ but we must define this macro anyway.) */
+
+#undef ASM_OUTPUT_ADDR_VEC_ELT
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\t.word .L%d\n", VALUE)
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry. */
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(.LP%d),%%o0\n\tcall mcount\n\tor %%lo(.LP%d),%%o0,%%o0\n", \
+ (LABELNO), (LABELNO))
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#undef FUNCTION_BLOCK_PROFILER
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(.LPBX0),%%o0\n\tld [%%lo(.LPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne .LPY%d\n\tnop\n\tcall ___bb_init_func\n\tnop\n.LPY%d:\n", \
+ (LABELNO), (LABELNO))
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#undef BLOCK_PROFILER
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+{ \
+ int blockn = (BLOCKNO); \
+ fprintf (FILE, "\tsethi %%hi(.LPBX2+%d),%%g1\n\tld [%%lo(.LPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(.LPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+ CC_STATUS_INIT; /* We have clobbered %g1. Also %g2. */ \
+}
+/* This is needed for SunOS 4.0, and should not hurt for 3.2
+ versions either. */
+#undef ASM_OUTPUT_SOURCE_LINE(file, line)
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+ { static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,.LM%d\n.LM%d:\n", \
+ line, sym_lineno, sym_lineno); \
+ sym_lineno += 1; }
+
+#define ASM_INT_OP ".long "
diff --git a/contrib/gcc/config/sparc/rtems.h b/contrib/gcc/config/sparc/rtems.h
new file mode 100644
index 0000000..1ab0a42
--- /dev/null
+++ b/contrib/gcc/config/sparc/rtems.h
@@ -0,0 +1,35 @@
+/* Definitions for rtems targeting a SPARC using a.out.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Joel Sherrill (joel@OARcorp.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* Specify predefined symbols in preprocessor. */
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__GCC_NEW_VARARGS__ -Drtems -D__rtems__ \
+ -Asystem(rtems) -Acpu(sparc) -Amachine(sparc)"
+
+/* Generate calls to memcpy, memcmp and memset. */
+#ifndef TARGET_MEM_FUNCTIONS
+#define TARGET_MEM_FUNCTIONS
+#endif
+
+/* end of sparc/rtems.h */
diff --git a/contrib/gcc/config/sparc/sol2-c1.asm b/contrib/gcc/config/sparc/sol2-c1.asm
new file mode 100644
index 0000000..618d698
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-c1.asm
@@ -0,0 +1,86 @@
+! crt1.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file takes control of the process from the kernel, as specified
+! in section 3 of the SVr4 ABI.
+! This file is the first thing linked into any executable.
+
+ .section ".text"
+ .proc 022
+ .global _start
+
+_start:
+ mov 0, %fp ! Mark bottom frame pointer
+ ld [%sp + 64], %l0 ! argc
+ add %sp, 68, %l1 ! argv
+
+ ! Leave some room for a call. Sun leaves 32 octets (to sit on
+ ! a cache line?) so we do too.
+ sub %sp, 32, %sp
+
+ ! %g1 may contain a function to be registered w/atexit
+ orcc %g0, %g1, %g0
+ be .nope
+ mov %g1, %o0
+ call atexit
+ nop
+.nope:
+ ! Now make sure constructors and destructors are handled.
+ set _fini, %o0
+ call atexit, 1
+ nop
+ call _init, 0
+ nop
+
+ ! We ignore the auxiliary vector; there's no defined way to
+ ! access those data anyway. Instead, go straight to main:
+ mov %l0, %o0 ! argc
+ mov %l1, %o1 ! argv
+ ! Skip argc words past argv, to env:
+ sll %l0, 2, %o2
+ add %o2, 4, %o2
+ add %l1, %o2, %o2 ! env
+ set _environ, %o3
+ st %o2, [%o3] ! *_environ
+ call main, 4
+ nop
+ call exit, 0
+ nop
+ call _exit, 0
+ nop
+ ! We should never get here.
+
+ .type _start,#function
+ .size _start,.-_start
diff --git a/contrib/gcc/config/sparc/sol2-ci.asm b/contrib/gcc/config/sparc/sol2-ci.asm
new file mode 100644
index 0000000..dd09a34
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-ci.asm
@@ -0,0 +1,60 @@
+! crti.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just make a stack frame for the contents of the .fini and
+! .init sections. Users may put any desired instructions in those
+! sections.
+
+! This file is linked in before the Values-Xx.o files and also before
+! crtbegin, with which perhaps it should be merged.
+
+ .file "crti.s"
+
+ .section ".init"
+ .proc 022
+ .global _init
+ .type _init,#function
+ .align 4
+_init:
+ save %sp, -96, %sp
+
+
+ .section ".fini"
+ .proc 022
+ .global _fini
+ .type _fini,#function
+ .align 4
+_fini:
+ save %sp, -96, %sp
diff --git a/contrib/gcc/config/sparc/sol2-cn.asm b/contrib/gcc/config/sparc/sol2-cn.asm
new file mode 100644
index 0000000..3c5d508
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-cn.asm
@@ -0,0 +1,54 @@
+! crtn.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file just makes sure that the .fini and .init sections do in
+! fact return. Users may put any desired instructions in those sections.
+! This file is the last thing linked into any executable.
+
+ .file "crtn.s"
+
+ .section ".init"
+ .align 4
+
+ ret
+ restore
+
+ .section ".fini"
+ .align 4
+
+ ret
+ restore
+
+! Th-th-th-that's all folks!
diff --git a/contrib/gcc/config/sparc/sol2-g1.asm b/contrib/gcc/config/sparc/sol2-g1.asm
new file mode 100644
index 0000000..b9d8788
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-g1.asm
@@ -0,0 +1,88 @@
+! gcrt1.s for solaris 2.0.
+
+! Copyright (C) 1992 Free Software Foundation, Inc.
+! Written By David Vinayak Henkel-Wallace, June 1992
+!
+! This file is free software; you can redistribute it and/or modify it
+! under the terms of the GNU General Public License as published by the
+! Free Software Foundation; either version 2, or (at your option) any
+! later version.
+!
+! In addition to the permissions in the GNU General Public License, the
+! Free Software Foundation gives you unlimited permission to link the
+! compiled version of this file with other programs, and to distribute
+! those programs without any restriction coming from the use of this
+! file. (The General Public License restrictions do apply in other
+! respects; for example, they cover modification of the file, and
+! distribution when not linked into another program.)
+!
+! This file is distributed in the hope that it will be useful, but
+! WITHOUT ANY WARRANTY; without even the implied warranty of
+! MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+! General Public License for more details.
+!
+! You should have received a copy of the GNU General Public License
+! along with this program; see the file COPYING. If not, write to
+! the Free Software Foundation, 59 Temple Place - Suite 330,
+! Boston, MA 02111-1307, USA.
+!
+! As a special exception, if you link this library with files
+! compiled with GCC to produce an executable, this does not cause
+! the resulting executable to be covered by the GNU General Public License.
+! This exception does not however invalidate any other reasons why
+! the executable file might be covered by the GNU General Public License.
+!
+
+! This file takes control of the process from the kernel, as specified
+! in section 3 of the SVr4 ABI.
+! This file is the first thing linked into any executable.
+
+ .section ".text"
+ .proc 022
+ .global _start
+
+_start:
+ mov 0, %fp ! Mark bottom frame pointer
+ ld [%sp + 64], %l0 ! argc
+ add %sp, 68, %l1 ! argv
+
+ ! Leave some room for a call. Sun leaves 32 octets (to sit on
+ ! a cache line?) so we do too.
+ sub %sp, 32, %sp
+
+ ! %g1 may contain a function to be registered w/atexit
+ orcc %g0, %g1, %g0
+ be .nope
+ mov %g1, %o0
+ call atexit
+ nop
+.nope:
+ ! Now make sure constructors and destructors are handled.
+ set _fini, %o0
+ call atexit, 1
+ nop
+ call _init, 0
+ nop
+
+ ! We ignore the auxiliary vector; there's no defined way to
+ ! access those data anyway. Instead, go straight to main:
+ mov %l0, %o0 ! argc
+ mov %l1, %o1 ! argv
+ set ___Argv, %o3
+ st %o1, [%o3] ! *___Argv
+ ! Skip argc words past argv, to env:
+ sll %l0, 2, %o2
+ add %o2, 4, %o2
+ add %l1, %o2, %o2 ! env
+ set _environ, %o3
+ st %o2, [%o3] ! *_environ
+ call main, 4
+ nop
+ call exit, 0
+ nop
+ call _exit, 0
+ nop
+ ! We should never get here.
+
+ .type _start,#function
+ .size _start,.-_start
diff --git a/contrib/gcc/config/sparc/sol2-sld.h b/contrib/gcc/config/sparc/sol2-sld.h
new file mode 100644
index 0000000..a824987
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2-sld.h
@@ -0,0 +1,11 @@
+/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
+ using the system linker. */
+
+#include "sparc/sol2.h"
+
+/* At least up through Solaris 2.6,
+ the system linker does not work with DWARF or DWARF2,
+ since it does not have working support for relocations
+ to unaligned data. */
+
+#define LINKER_DOES_NOT_WORK_WITH_DWARF2
diff --git a/contrib/gcc/config/sparc/sol2.h b/contrib/gcc/config/sparc/sol2.h
new file mode 100644
index 0000000..a0fa4a8
--- /dev/null
+++ b/contrib/gcc/config/sparc/sol2.h
@@ -0,0 +1,232 @@
+/* Definitions of target machine for GNU compiler, for SPARC running Solaris 2
+ Copyright 1992, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@netcom.com).
+ Additional changes by David V. Henkel-Wallace (gumby@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Supposedly the same as vanilla sparc svr4, except for the stuff below: */
+#include "sparc/sysv4.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+"-Dsparc -Dsun -Dunix -D__svr4__ -D__SVR4 \
+-Asystem(unix) -Asystem(svr4)"
+
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "\
+%{pthreads:-D_REENTRANT -D_PTHREADS} \
+%{!pthreads:%{threads:-D_REENTRANT -D_SOLARIS_THREADS}} \
+%{compat-bsd:-iwithprefixbefore ucbinclude -I/usr/ucbinclude} \
+"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+%{fpic:-K PIC} %{fPIC:-K PIC} \
+%(asm_cpu) \
+"
+
+/* This is here rather than in sparc.h because it's not known what
+ other assemblers will accept. */
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-xarch=v8plus"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+#undef ASM_CPU_DEFAULT_SPEC
+#define ASM_CPU_DEFAULT_SPEC "-xarch=v8plusa"
+#endif
+#undef ASM_CPU_SPEC
+#define ASM_CPU_SPEC "\
+%{mcpu=v8plus:-xarch=v8plus} \
+%{mcpu=ultrasparc:-xarch=v8plusa} \
+%{!mcpu*:%(asm_cpu_default)} \
+"
+
+/* However it appears that Solaris 2.0 uses the same reg numbering as
+ the old BSD-style system did. */
+
+#undef DBX_REGISTER_NUMBER
+/* Same as sparc.h */
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* The Solaris 2 assembler uses .skip, not .zero, so put this back. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+/* Use .uahalf/.uaword so packed structure members don't generate
+ assembler errors when using the native assembler. */
+#undef ASM_SHORT
+#define ASM_SHORT ".uahalf"
+#undef ASM_LONG
+#define ASM_LONG ".uaword"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*.L%s%ld", (PREFIX), (long)(NUM))
+
+
+/* We don't use the standard svr4 STARTFILE_SPEC because it's wrong for us.
+ We don't use the standard LIB_SPEC only because we don't yet support c++ */
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{!shared: \
+ %{!symbolic: \
+ %{p:mcrt1.o%s} \
+ %{!p: \
+ %{pg:gcrt1.o%s gmon.o%s} \
+ %{!pg:crt1.o%s}}}} \
+ crti.o%s \
+ %{ansi:values-Xc.o%s} \
+ %{!ansi: \
+ %{traditional:values-Xt.o%s} \
+ %{!traditional:values-Xa.o%s}} \
+ crtbegin.o%s"
+
+/* ??? Note: in order for -compat-bsd to work fully,
+ we must somehow arrange to fixincludes /usr/ucbinclude
+ and put the result in $(libsubdir)/ucbinclude. */
+
+#undef LIB_SPEC
+#define LIB_SPEC \
+ "%{compat-bsd:-lucb -lsocket -lnsl -lelf -laio} \
+ %{!shared:\
+ %{!symbolic:\
+ %{pthreads:-lpthread} \
+ %{!pthreads:%{threads:-lthread}} \
+ -lc}}"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* This should be the same as in svr4.h, except with -R added. */
+#undef LINK_SPEC
+#define LINK_SPEC \
+ "%{h*} %{v:-V} \
+ %{b} %{Wl,*:%*} \
+ %{static:-dn -Bstatic} \
+ %{shared:-G -dy %{!mimpure-text:-z text}} \
+ %{symbolic:-Bsymbolic -G -dy -z text} \
+ %{G:-G} \
+ %{YP,*} \
+ %{R*} \
+ %{compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ucblib:/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ucblib:/usr/ccs/lib:/usr/lib}}} \
+ -R /usr/ucblib} \
+ %{!compat-bsd: \
+ %{!YP,*:%{p:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{pg:-Y P,/usr/ccs/lib/libp:/usr/lib/libp:/usr/ccs/lib:/usr/lib} \
+ %{!p:%{!pg:-Y P,/usr/ccs/lib:/usr/lib}}}} \
+ %{Qy:} %{!Qn:-Qy}"
+
+/* This defines which switch letters take arguments.
+ It is as in svr4.h but with -R added. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ (DEFAULT_SWITCH_TAKES_ARG(CHAR) \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'x' \
+ || (CHAR) == 'z')
+
+/* ??? This does not work in SunOS 4.x, so it is not enabled in sparc.h.
+ Instead, it is enabled here, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* But indicate that it isn't supported by the hardware. */
+#define WIDEST_HARDWARE_FP_SIZE 64
+
+#define STDC_0_IN_SYSTEM_HEADERS
+
+#define MULDI3_LIBCALL "__mul64"
+#define DIVDI3_LIBCALL "__div64"
+#define UDIVDI3_LIBCALL "__udiv64"
+#define MODDI3_LIBCALL "__rem64"
+#define UMODDI3_LIBCALL "__urem64"
+
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS \
+ fixsfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ftoll"); \
+ fixunssfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__ftoull"); \
+ fixdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__dtoll"); \
+ fixunsdfdi_libfunc = gen_rtx_SYMBOL_REF (Pmode, "__dtoull")
+
+/* No weird SPARC variants on Solaris */
+#undef TARGET_LIVE_G0
+#define TARGET_LIVE_G0 0
+#undef TARGET_BROKEN_SAVERESTORE
+#define TARGET_BROKEN_SAVERESTORE 0
+
+/* Solaris allows 64 bit out and global registers in 32 bit mode.
+ sparc_override_options will disable V8+ if not generating V9 code. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_V8PLUS)
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
+
diff --git a/contrib/gcc/config/sparc/sp64-aout.h b/contrib/gcc/config/sparc/sp64-aout.h
new file mode 100644
index 0000000..e3056df
--- /dev/null
+++ b/contrib/gcc/config/sparc/sp64-aout.h
@@ -0,0 +1,38 @@
+/* Definitions of target machine for GNU compiler, for SPARC64, a.out.
+ Copyright (C) 1994, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans, dje@cygnus.com.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+#include "aoutos.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64-aout)")
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+ (MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
+ + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
+
+/* The only code model supported is Medium/Low. */
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_MEDLOW
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
diff --git a/contrib/gcc/config/sparc/sp64-elf.h b/contrib/gcc/config/sparc/sp64-elf.h
new file mode 100644
index 0000000..2482866
--- /dev/null
+++ b/contrib/gcc/config/sparc/sp64-elf.h
@@ -0,0 +1,157 @@
+/* Definitions of target machine for GNU compiler, for SPARC64, ELF.
+ Copyright (C) 1994, 1995, 1996, 1997, 1998 Free Software Foundation, Inc.
+ Contributed by Doug Evans, dje@cygnus.com.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* ??? We're taking the scheme of including another file and then overriding
+ the values we don't like a bit too far here. The alternative is to more or
+ less duplicate all of svr4.h, sparc/sysv4.h, and sparc/sol2.h here
+ (suitably cleaned up). */
+
+#include "sparc/sol2.h"
+
+#undef TARGET_VERSION
+#define TARGET_VERSION fprintf (stderr, " (sparc64-elf)")
+
+/* A 64 bit v9 compiler in a Medium/Anywhere code model environment. */
+
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT \
+(MASK_V9 + MASK_PTR64 + MASK_64BIT + MASK_HARD_QUAD \
+ + MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU + MASK_STACK_BIAS)
+
+#undef SPARC_DEFAULT_CMODEL
+#define SPARC_DEFAULT_CMODEL CM_EMBMEDANY
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -D__ELF__ -Acpu(sparc) -Amachine(sparc)"
+
+/* __svr4__ is used by the C library (FIXME) */
+#undef CPP_SUBTARGET_SPEC
+#define CPP_SUBTARGET_SPEC "-D__svr4__"
+
+#undef MD_EXEC_PREFIX
+#undef MD_STARTFILE_PREFIX
+
+#undef ASM_SPEC
+#define ASM_SPEC "\
+%{v:-V} -s %{fpic:-K PIC} %{fPIC:-K PIC} \
+%{mlittle-endian:-EL} \
+%(asm_cpu) %(asm_arch) \
+"
+
+/* This is taken from sol2.h. */
+#undef LINK_SPEC
+#define LINK_SPEC "\
+%{v:-V} \
+%{mlittle-endian:-EL} \
+"
+
+/* We need something a little simpler for the embedded environment.
+ Profiling doesn't really work yet so we just copy the default. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "\
+%{!shared:%{pg:gcrt0.o%s}%{!pg:%{p:mcrt0.o%s}%{!p:crt0.o%s}}} \
+crtbegin.o%s \
+"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s"
+
+/* Use the default (for now). */
+#undef LIB_SPEC
+
+/* V9 chips can handle either endianness. */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{"big-endian", -MASK_LITTLE_ENDIAN}, \
+{"little-endian", MASK_LITTLE_ENDIAN},
+
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+
+#undef WORDS_BIG_ENDIAN
+#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+
+/* ??? This should be 32 bits for v9 but what can we do? */
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "short unsigned int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE 16
+
+#undef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE 128
+
+/* The medium/anywhere code model practically requires us to put jump tables
+ in the text section as gcc is unable to distinguish LABEL_REF's of jump
+ tables from other label refs (when we need to). */
+/* ??? Revisit this. */
+#undef JUMP_TABLES_IN_TEXT_SECTION
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* System V Release 4 uses DWARF debugging info.
+ GDB doesn't support 64 bit stabs yet and the desired debug format is DWARF
+ anyway so it is the default. */
+
+#define DWARF_DEBUGGING_INFO
+#define DWARF2_DEBUGGING_INFO
+#define DBX_DEBUGGING_INFO
+
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+
+/* Stabs doesn't use this, and it confuses a simulator. */
+/* ??? Need to see what DWARF needs, if anything. */
+#undef ASM_IDENTIFY_GCC
+#define ASM_IDENTIFY_GCC(FILE)
+
+/* Define the names of various pseudo-ops used by the Sparc/svr4 assembler.
+ ??? If ints are 64 bits then UNALIGNED_INT_ASM_OP (defined elsewhere) is
+ misnamed. These should all refer to explicit sizes (half/word/xword?),
+ anything other than short/int/long/etc. */
+
+#define UNALIGNED_LONGLONG_ASM_OP ".uaxword"
+
+/* DWARF stuff. */
+
+#define ASM_OUTPUT_DWARF_ADDR(FILE, LABEL) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_LONGLONG_ASM_OP); \
+ assemble_name ((FILE), (LABEL)); \
+ fprintf ((FILE), "\n"); \
+} while (0)
+
+#define ASM_OUTPUT_DWARF_ADDR_CONST(FILE, RTX) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_LONGLONG_ASM_OP); \
+ output_addr_const ((FILE), (RTX)); \
+ fputc ('\n', (FILE)); \
+} while (0)
+
+#define ASM_OUTPUT_DWARF2_ADDR_CONST(FILE, ADDR) \
+ fprintf ((FILE), "\t%s\t%s", UNALIGNED_LONGLONG_ASM_OP, (ADDR))
+
+/* ??? Not sure if this should be 4 or 8 bytes. 4 works for now. */
+#define ASM_OUTPUT_DWARF_REF(FILE, LABEL) \
+do { \
+ fprintf ((FILE), "\t%s\t", UNALIGNED_INT_ASM_OP); \
+ assemble_name ((FILE), (LABEL)); \
+ fprintf ((FILE), "\n"); \
+} while (0)
diff --git a/contrib/gcc/config/sparc/sparc.c b/contrib/gcc/config/sparc/sparc.c
new file mode 100644
index 0000000..e350729
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.c
@@ -0,0 +1,6461 @@
+/* Subroutines for insn-output.c for Sun SPARC.
+ Copyright (C) 1987, 88, 89, 92-97, 1998 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com)
+ 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+ at Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "config.h"
+#include "system.h"
+#include "tree.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "real.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "expr.h"
+#include "recog.h"
+#include "toplev.h"
+
+/* 1 if the caller has placed an "unimp" insn immediately after the call.
+ This is used in v8 code when calling a function that returns a structure.
+ v9 doesn't have this. Be careful to have this test be the same as that
+ used on the call. */
+
+#define SKIP_CALLERS_UNIMP_P \
+(!TARGET_ARCH64 && current_function_returns_struct \
+ && ! integer_zerop (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+ && (TREE_CODE (DECL_SIZE (DECL_RESULT (current_function_decl))) \
+ == INTEGER_CST))
+
+/* Global variables for machine-dependent things. */
+
+/* Size of frame. Need to know this to emit return insns from leaf procedures.
+ ACTUAL_FSIZE is set by compute_frame_size() which is called during the
+ reload pass. This is important as the value is later used in insn
+ scheduling (to see what can go in a delay slot).
+ APPARENT_FSIZE is the size of the stack less the register save area and less
+ the outgoing argument area. It is used when saving call preserved regs. */
+static int apparent_fsize;
+static int actual_fsize;
+
+/* Save the operands last given to a compare for use when we
+ generate a scc or bcc insn. */
+
+rtx sparc_compare_op0, sparc_compare_op1;
+
+/* We may need an epilogue if we spill too many registers.
+ If this is non-zero, then we branch here for the epilogue. */
+static rtx leaf_label;
+
+#ifdef LEAF_REGISTERS
+
+/* Vector to say how input registers are mapped to output
+ registers. FRAME_POINTER_REGNUM cannot be remapped by
+ this function to eliminate it. You must use -fomit-frame-pointer
+ to get that. */
+char leaf_reg_remap[] =
+{ 0, 1, 2, 3, 4, 5, 6, 7,
+ -1, -1, -1, -1, -1, -1, 14, -1,
+ -1, -1, -1, -1, -1, -1, -1, -1,
+ 8, 9, 10, 11, 12, 13, -1, 15,
+
+ 32, 33, 34, 35, 36, 37, 38, 39,
+ 40, 41, 42, 43, 44, 45, 46, 47,
+ 48, 49, 50, 51, 52, 53, 54, 55,
+ 56, 57, 58, 59, 60, 61, 62, 63,
+ 64, 65, 66, 67, 68, 69, 70, 71,
+ 72, 73, 74, 75, 76, 77, 78, 79,
+ 80, 81, 82, 83, 84, 85, 86, 87,
+ 88, 89, 90, 91, 92, 93, 94, 95,
+ 96, 97, 98, 99, 100};
+
+#endif
+
+/* Name of where we pretend to think the frame pointer points.
+ Normally, this is "%fp", but if we are in a leaf procedure,
+ this is "%sp+something". We record "something" separately as it may be
+ too big for reg+constant addressing. */
+
+static char *frame_base_name;
+static int frame_base_offset;
+
+static rtx pic_setup_code PROTO((void));
+static rtx find_addr_reg PROTO((rtx));
+static void sparc_init_modes PROTO((void));
+static int save_regs PROTO((FILE *, int, int, char *,
+ int, int, int));
+static int restore_regs PROTO((FILE *, int, int, char *, int, int));
+static void build_big_number PROTO((FILE *, int, char *));
+static int function_arg_slotno PROTO((const CUMULATIVE_ARGS *,
+ enum machine_mode, tree, int, int,
+ int *, int *));
+
+#ifdef DWARF2_DEBUGGING_INFO
+extern char *dwarf2out_cfi_label ();
+#endif
+
+/* Option handling. */
+
+/* Code model option as passed by user. */
+char *sparc_cmodel_string;
+/* Parsed value. */
+enum cmodel sparc_cmodel;
+
+/* Record alignment options as passed by user. */
+char *sparc_align_loops_string;
+char *sparc_align_jumps_string;
+char *sparc_align_funcs_string;
+
+/* Parsed values, as a power of two. */
+int sparc_align_loops;
+int sparc_align_jumps;
+int sparc_align_funcs;
+
+struct sparc_cpu_select sparc_select[] =
+{
+ /* switch name, tune arch */
+ { (char *)0, "default", 1, 1 },
+ { (char *)0, "-mcpu=", 1, 1 },
+ { (char *)0, "-mtune=", 1, 0 },
+ { 0, 0, 0, 0 }
+};
+
+/* CPU type. This is set from TARGET_CPU_DEFAULT and -m{cpu,tune}=xxx. */
+enum processor_type sparc_cpu;
+
+/* Validate and override various options, and do some machine dependent
+ initialization. */
+
+void
+sparc_override_options ()
+{
+ static struct code_model {
+ char *name;
+ int value;
+ } cmodels[] = {
+ { "32", CM_32 },
+ { "medlow", CM_MEDLOW },
+ { "medmid", CM_MEDMID },
+ { "medany", CM_MEDANY },
+ { "embmedany", CM_EMBMEDANY },
+ { 0, 0 }
+ };
+ struct code_model *cmodel;
+ /* Map TARGET_CPU_DEFAULT to value for -m{arch,tune}=. */
+ static struct cpu_default {
+ int cpu;
+ char *name;
+ } cpu_default[] = {
+ /* There must be one entry here for each TARGET_CPU value. */
+ { TARGET_CPU_sparc, "cypress" },
+ { TARGET_CPU_sparclet, "tsc701" },
+ { TARGET_CPU_sparclite, "f930" },
+ { TARGET_CPU_v8, "v8" },
+ { TARGET_CPU_supersparc, "supersparc" },
+ { TARGET_CPU_v9, "v9" },
+ { TARGET_CPU_ultrasparc, "ultrasparc" },
+ { 0, 0 }
+ };
+ struct cpu_default *def;
+ /* Table of values for -m{cpu,tune}=. */
+ static struct cpu_table {
+ char *name;
+ enum processor_type processor;
+ int disable;
+ int enable;
+ } cpu_table[] = {
+ { "v7", PROCESSOR_V7, MASK_ISA, 0 },
+ { "cypress", PROCESSOR_CYPRESS, MASK_ISA, 0 },
+ { "v8", PROCESSOR_V8, MASK_ISA, MASK_V8 },
+ /* TI TMS390Z55 supersparc */
+ { "supersparc", PROCESSOR_SUPERSPARC, MASK_ISA, MASK_V8 },
+ { "sparclite", PROCESSOR_SPARCLITE, MASK_ISA, MASK_SPARCLITE },
+ /* The Fujitsu MB86930 is the original sparclite chip, with no fpu.
+ The Fujitsu MB86934 is the recent sparclite chip, with an fpu. */
+ { "f930", PROCESSOR_F930, MASK_ISA|MASK_FPU, MASK_SPARCLITE },
+ { "f934", PROCESSOR_F934, MASK_ISA, MASK_SPARCLITE|MASK_FPU },
+ { "sparclet", PROCESSOR_SPARCLET, MASK_ISA, MASK_SPARCLET },
+ /* TEMIC sparclet */
+ { "tsc701", PROCESSOR_TSC701, MASK_ISA, MASK_SPARCLET },
+ { "v9", PROCESSOR_V9, MASK_ISA, MASK_V9 },
+ /* TI ultrasparc */
+ { "ultrasparc", PROCESSOR_ULTRASPARC, MASK_ISA, MASK_V9 },
+ { 0, 0, 0, 0 }
+ };
+ struct cpu_table *cpu;
+ struct sparc_cpu_select *sel;
+ int fpu;
+
+#ifndef SPARC_BI_ARCH
+ /* Check for unsupported architecture size. */
+ if (! TARGET_64BIT != DEFAULT_ARCH32_P)
+ {
+ error ("%s is not supported by this configuration",
+ DEFAULT_ARCH32_P ? "-m64" : "-m32");
+ }
+#endif
+
+ /* Code model selection. */
+ sparc_cmodel = SPARC_DEFAULT_CMODEL;
+ if (sparc_cmodel_string != NULL)
+ {
+ if (TARGET_ARCH64)
+ {
+ for (cmodel = &cmodels[0]; cmodel->name; cmodel++)
+ if (strcmp (sparc_cmodel_string, cmodel->name) == 0)
+ break;
+ if (cmodel->name == NULL)
+ error ("bad value (%s) for -mcmodel= switch", sparc_cmodel_string);
+ else
+ sparc_cmodel = cmodel->value;
+ }
+ else
+ error ("-mcmodel= is not supported on 32 bit systems");
+ }
+
+ fpu = TARGET_FPU; /* save current -mfpu status */
+
+ /* Set the default CPU. */
+ for (def = &cpu_default[0]; def->name; ++def)
+ if (def->cpu == TARGET_CPU_DEFAULT)
+ break;
+ if (! def->name)
+ abort ();
+ sparc_select[0].string = def->name;
+
+ for (sel = &sparc_select[0]; sel->name; ++sel)
+ {
+ if (sel->string)
+ {
+ for (cpu = &cpu_table[0]; cpu->name; ++cpu)
+ if (! strcmp (sel->string, cpu->name))
+ {
+ if (sel->set_tune_p)
+ sparc_cpu = cpu->processor;
+
+ if (sel->set_arch_p)
+ {
+ target_flags &= ~cpu->disable;
+ target_flags |= cpu->enable;
+ }
+ break;
+ }
+
+ if (! cpu->name)
+ error ("bad value (%s) for %s switch", sel->string, sel->name);
+ }
+ }
+
+ /* If -mfpu or -mno-fpu was explicitly used, don't override with
+ the processor default. */
+ if (TARGET_FPU_SET)
+ target_flags = (target_flags & ~MASK_FPU) | fpu;
+
+ /* Use the deprecated v8 insns for sparc64 in 32 bit mode. */
+ if (TARGET_V9 && TARGET_ARCH32)
+ target_flags |= MASK_DEPRECATED_V8_INSNS;
+
+ /* V8PLUS requires V9 */
+ if (! TARGET_V9)
+ target_flags &= ~MASK_V8PLUS;
+
+ /* Don't use stack biasing in 32 bit mode. */
+ if (TARGET_ARCH32)
+ target_flags &= ~MASK_STACK_BIAS;
+
+ /* Validate -malign-loops= value, or provide default. */
+ if (sparc_align_loops_string)
+ {
+ sparc_align_loops = exact_log2 (atoi (sparc_align_loops_string));
+ if (sparc_align_loops < 2 || sparc_align_loops > 7)
+ fatal ("-malign-loops=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_loops_string);
+ }
+ else
+ {
+ /* ??? This relies on ASM_OUTPUT_ALIGN to not emit the alignment if
+ its 0. This sounds a bit kludgey. */
+ sparc_align_loops = 0;
+ }
+
+ /* Validate -malign-jumps= value, or provide default. */
+ if (sparc_align_jumps_string)
+ {
+ sparc_align_jumps = exact_log2 (atoi (sparc_align_jumps_string));
+ if (sparc_align_jumps < 2 || sparc_align_loops > 7)
+ fatal ("-malign-jumps=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_jumps_string);
+ }
+ else
+ {
+ /* ??? This relies on ASM_OUTPUT_ALIGN to not emit the alignment if
+ its 0. This sounds a bit kludgey. */
+ sparc_align_jumps = 0;
+ }
+
+ /* Validate -malign-functions= value, or provide default. */
+ if (sparc_align_funcs_string)
+ {
+ sparc_align_funcs = exact_log2 (atoi (sparc_align_funcs_string));
+ if (sparc_align_funcs < 2 || sparc_align_loops > 7)
+ fatal ("-malign-functions=%s is not between 4 and 128 or is not a power of two",
+ sparc_align_funcs_string);
+ }
+ else
+ sparc_align_funcs = DEFAULT_SPARC_ALIGN_FUNCS;
+
+ /* Validate PCC_STRUCT_RETURN. */
+ if (flag_pcc_struct_return == DEFAULT_PCC_STRUCT_RETURN)
+ flag_pcc_struct_return = (TARGET_ARCH64 ? 0 : 1);
+
+ /* Do various machine dependent initializations. */
+ sparc_init_modes ();
+}
+
+/* Miscellaneous utilities. */
+
+/* Nonzero if CODE, a comparison, is suitable for use in v9 conditional move
+ or branch on register contents instructions. */
+
+int
+v9_regcmp_p (code)
+ enum rtx_code code;
+{
+ return (code == EQ || code == NE || code == GE || code == LT
+ || code == LE || code == GT);
+}
+
+/* 32 bit registers are zero extended so only zero/non-zero comparisons
+ work. */
+int
+v8plus_regcmp_p (code)
+ enum rtx_code code;
+{
+ return (code == EQ || code == NE);
+}
+
+/* Operand constraints. */
+
+/* Return non-zero only if OP is a register of mode MODE,
+ or const0_rtx. Don't allow const0_rtx if TARGET_LIVE_G0 because
+ %g0 may contain anything. */
+
+int
+reg_or_0_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+ if (TARGET_LIVE_G0)
+ return 0;
+ if (op == const0_rtx)
+ return 1;
+ if (GET_MODE (op) == VOIDmode && GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (op) == 0
+ && CONST_DOUBLE_LOW (op) == 0)
+ return 1;
+ if (GET_MODE_CLASS (GET_MODE (op)) == MODE_FLOAT
+ && GET_CODE (op) == CONST_DOUBLE
+ && fp_zero_operand (op))
+ return 1;
+ return 0;
+}
+
+/* Nonzero if OP is a floating point value with value 0.0. */
+
+int
+fp_zero_operand (op)
+ rtx op;
+{
+ REAL_VALUE_TYPE r;
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op);
+ return (REAL_VALUES_EQUAL (r, dconst0) && ! REAL_VALUE_MINUS_ZERO (r));
+}
+
+/* Nonzero if OP is an integer register. */
+
+int
+intreg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (register_operand (op, SImode)
+ || (TARGET_ARCH64 && register_operand (op, DImode)));
+}
+
+/* Nonzero if OP is a floating point condition code register. */
+
+int
+fcc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ /* This can happen when recog is called from combine. Op may be a MEM.
+ Fail instead of calling abort in this case. */
+ if (GET_CODE (op) != REG)
+ return 0;
+
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ if (mode == VOIDmode
+ && (GET_MODE (op) != CCFPmode && GET_MODE (op) != CCFPEmode))
+ return 0;
+
+#if 0 /* ??? ==> 1 when %fcc0-3 are pseudos first. See gen_compare_reg(). */
+ if (reg_renumber == 0)
+ return REGNO (op) >= FIRST_PSEUDO_REGISTER;
+ return REGNO_OK_FOR_CCFP_P (REGNO (op));
+#else
+ return (unsigned) REGNO (op) - SPARC_FIRST_V9_FCC_REG < 4;
+#endif
+}
+
+/* Nonzero if OP is an integer or floating point condition code register. */
+
+int
+icc_or_fcc_reg_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) == REG && REGNO (op) == SPARC_ICC_REG)
+ {
+ if (mode != VOIDmode && mode != GET_MODE (op))
+ return 0;
+ if (mode == VOIDmode
+ && GET_MODE (op) != CCmode && GET_MODE (op) != CCXmode)
+ return 0;
+ return 1;
+ }
+
+ return fcc_reg_operand (op, mode);
+}
+
+/* Nonzero if OP can appear as the dest of a RESTORE insn. */
+int
+restore_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (GET_CODE (op) == REG && GET_MODE (op) == mode
+ && (REGNO (op) < 8 || (REGNO (op) >= 24 && REGNO (op) < 32)));
+}
+
+/* Call insn on SPARC can take a PC-relative constant address, or any regular
+ memory address. */
+
+int
+call_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != MEM)
+ abort ();
+ op = XEXP (op, 0);
+ return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
+}
+
+int
+call_operand_address (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (symbolic_operand (op, mode) || memory_address_p (Pmode, op));
+}
+
+/* Returns 1 if OP is either a symbol reference or a sum of a symbol
+ reference and a constant. */
+
+int
+symbolic_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case CONST:
+ op = XEXP (op, 0);
+ return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ && GET_CODE (XEXP (op, 1)) == CONST_INT);
+
+ /* ??? This clause seems to be irrelevant. */
+ case CONST_DOUBLE:
+ return GET_MODE (op) == mode;
+
+ default:
+ return 0;
+ }
+}
+
+/* Return truth value of statement that OP is a symbolic memory
+ operand of mode MODE. */
+
+int
+symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == CONST
+ || GET_CODE (op) == HIGH || GET_CODE (op) == LABEL_REF);
+}
+
+/* Return truth value of statement that OP is a LABEL_REF of mode MODE. */
+
+int
+label_ref_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (GET_CODE (op) != LABEL_REF)
+ return 0;
+ if (GET_MODE (op) != mode)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if the operand is an argument used in generating pic references
+ in either the medium/low or medium/anywhere code models of sparc64. */
+
+int
+sp64_medium_pic_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ /* Check for (const (minus (symbol_ref:GOT)
+ (const (minus (label) (pc))))). */
+ if (GET_CODE (op) != CONST)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != MINUS)
+ return 0;
+ if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
+ return 0;
+ /* ??? Ensure symbol is GOT. */
+ if (GET_CODE (XEXP (op, 1)) != CONST)
+ return 0;
+ if (GET_CODE (XEXP (XEXP (op, 1), 0)) != MINUS)
+ return 0;
+ return 1;
+}
+
+/* Return 1 if the operand is a data segment reference. This includes
+ the readonly data segment, or in other words anything but the text segment.
+ This is needed in the medium/anywhere code model on v9. These values
+ are accessed with EMBMEDANY_BASE_REG. */
+
+int
+data_segment_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ switch (GET_CODE (op))
+ {
+ case SYMBOL_REF :
+ return ! SYMBOL_REF_FLAG (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return data_segment_operand (XEXP (op, 0));
+ default :
+ return 0;
+ }
+}
+
+/* Return 1 if the operand is a text segment reference.
+ This is needed in the medium/anywhere code model on v9. */
+
+int
+text_segment_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ switch (GET_CODE (op))
+ {
+ case LABEL_REF :
+ return 1;
+ case SYMBOL_REF :
+ return SYMBOL_REF_FLAG (op);
+ case PLUS :
+ /* Assume canonical format of symbol + constant.
+ Fall through. */
+ case CONST :
+ return text_segment_operand (XEXP (op, 0));
+ default :
+ return 0;
+ }
+}
+
+/* Return 1 if the operand is either a register or a memory operand that is
+ not symbolic. */
+
+int
+reg_or_nonsymb_mem_operand (op, mode)
+ register rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode))
+ return 1;
+
+ if (memory_operand (op, mode) && ! symbolic_memory_operand (op, mode))
+ return 1;
+
+ return 0;
+}
+
+int
+sparc_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return SMALL_INT (op);
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (GET_CODE (XEXP (op, 0)) == REG
+ && symbolic_operand (XEXP (op, 1), Pmode));
+ return memory_address_p (mode, op);
+}
+
+int
+move_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ if (mode == DImode && arith_double_operand (op, mode))
+ return 1;
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) == CONST_INT)
+ return SMALL_INT (op) || SPARC_SETHI_P (INTVAL (op));
+
+ if (GET_MODE (op) != mode)
+ return 0;
+ if (GET_CODE (op) == SUBREG)
+ op = SUBREG_REG (op);
+ if (GET_CODE (op) != MEM)
+ return 0;
+ op = XEXP (op, 0);
+ if (GET_CODE (op) == LO_SUM)
+ return (register_operand (XEXP (op, 0), Pmode)
+ && CONSTANT_P (XEXP (op, 1)));
+ return memory_address_p (mode, op);
+}
+
+int
+splittable_symbolic_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+ if (! symbolic_operand (XEXP (op, 0), Pmode))
+ return 0;
+ return 1;
+}
+
+int
+splittable_immediate_memory_operand (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) != MEM)
+ return 0;
+ if (! immediate_operand (XEXP (op, 0), Pmode))
+ return 0;
+ return 1;
+}
+
+/* Return truth value of whether OP is EQ or NE. */
+
+int
+eq_or_neq (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
+}
+
+/* Return 1 if this is a comparison operator, but not an EQ, NE, GEU,
+ or LTU for non-floating-point. We handle those specially. */
+
+int
+normal_comp_operator (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CCFPmode
+ || GET_MODE (XEXP (op, 0)) == CCFPEmode)
+ return 1;
+
+ return (code != NE && code != EQ && code != GEU && code != LTU);
+}
+
+/* Return 1 if this is a comparison operator. This allows the use of
+ MATCH_OPERATOR to recognize all the branch insns. */
+
+int
+noov_compare_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ if (GET_MODE (XEXP (op, 0)) == CC_NOOVmode)
+ /* These are the only branches which work with CC_NOOVmode. */
+ return (code == EQ || code == NE || code == GE || code == LT);
+ return 1;
+}
+
+/* Nonzero if OP is a comparison operator suitable for use in v9
+ conditional move or branch on register contents instructions. */
+
+int
+v9_regcmp_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ if (GET_RTX_CLASS (code) != '<')
+ return 0;
+
+ return v9_regcmp_p (code);
+}
+
+/* ??? Same as eq_or_neq. */
+int
+v8plus_regcmp_op (op, mode)
+ register rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ enum rtx_code code = GET_CODE (op);
+
+ return (code == EQ || code == NE);
+}
+
+/* Return 1 if this is a SIGN_EXTEND or ZERO_EXTEND operation. */
+
+int
+extend_op (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can set
+ the condition codes explicitly. We do not include PLUS and MINUS
+ because these require CC_NOOVmode, which we handle explicitly. */
+
+int
+cc_arithop (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ if (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR
+ || GET_CODE (op) == XOR)
+ return 1;
+
+ return 0;
+}
+
+/* Return nonzero if OP is an operator of mode MODE which can bitwise
+ complement its second operand and set the condition codes explicitly. */
+
+int
+cc_arithopn (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ /* XOR is not here because combine canonicalizes (xor (not ...) ...)
+ and (xor ... (not ...)) to (not (xor ...)). */
+ return (GET_CODE (op) == AND
+ || GET_CODE (op) == IOR);
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 13 bit immediate field. This is an acceptable SImode operand for
+ most 3 address instructions. */
+
+int
+arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ int val;
+ if (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX)
+ return 1;
+ if (GET_CODE (op) != CONST_INT)
+ return 0;
+ val = INTVAL (op) & 0xffffffff;
+ return SPARC_SIMM13_P (val);
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 11 bit immediate field. This is an acceptable SImode operand for
+ the movcc instructions. */
+
+int
+arith11_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SPARC_SIMM11_P (INTVAL (op))));
+}
+
+/* Return true if OP is a register, or is a CONST_INT that can fit in a
+ signed 10 bit immediate field. This is an acceptable SImode operand for
+ the movrcc instructions. */
+
+int
+arith10_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SPARC_SIMM10_P (INTVAL (op))));
+}
+
+/* Return true if OP is a register, is a CONST_INT that fits in a 13 bit
+ immediate field, or is a CONST_DOUBLE whose both parts fit in a 13 bit
+ immediate field.
+ v9: Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in a 13 bit immediate field. This is an acceptable DImode operand
+ for most 3 address instructions. */
+
+int
+arith_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_INT && SMALL_INT (op))
+ || (! TARGET_ARCH64
+ && GET_CODE (op) == CONST_DOUBLE
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_HIGH (op) + 0x1000) < 0x2000)
+ || (TARGET_ARCH64
+ && GET_CODE (op) == CONST_DOUBLE
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x1000) < 0x2000
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0x1000)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x1000) == 0))));
+}
+
+/* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in an 11 bit immediate field. This is an acceptable DImode
+ operand for the movcc instructions. */
+/* ??? Replace with arith11_operand? */
+
+int
+arith11_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_DOUBLE
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (CONST_DOUBLE_LOW (op) + 0x400) < 0x800
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x400) == 0x400)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x400) == 0)))
+ || (GET_CODE (op) == CONST_INT
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x400) < 0x800));
+}
+
+/* Return true if OP is a register, or is a CONST_INT or CONST_DOUBLE that
+ can fit in an 10 bit immediate field. This is an acceptable DImode
+ operand for the movrcc instructions. */
+/* ??? Replace with arith10_operand? */
+
+int
+arith10_double_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return (register_operand (op, mode)
+ || GET_CODE (op) == CONSTANT_P_RTX
+ || (GET_CODE (op) == CONST_DOUBLE
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned) (CONST_DOUBLE_LOW (op) + 0x200) < 0x400
+ && ((CONST_DOUBLE_HIGH (op) == -1
+ && (CONST_DOUBLE_LOW (op) & 0x200) == 0x200)
+ || (CONST_DOUBLE_HIGH (op) == 0
+ && (CONST_DOUBLE_LOW (op) & 0x200) == 0)))
+ || (GET_CODE (op) == CONST_INT
+ && (GET_MODE (op) == mode || GET_MODE (op) == VOIDmode)
+ && (unsigned HOST_WIDE_INT) (INTVAL (op) + 0x200) < 0x400));
+}
+
+/* Return truth value of whether OP is a integer which fits the
+ range constraining immediate operands in most three-address insns,
+ which have a 13 bit immediate field. */
+
+int
+small_int (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return ((GET_CODE (op) == CONST_INT && SMALL_INT (op))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+}
+
+/* Recognize operand values for the umul instruction. That instruction sign
+ extends immediate values just like all other sparc instructions, but
+ interprets the extended result as an unsigned number. */
+
+int
+uns_small_int (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+#if HOST_BITS_PER_WIDE_INT > 32
+ /* All allowed constants will fit a CONST_INT. */
+ return ((GET_CODE (op) == CONST_INT
+ && ((INTVAL (op) >= 0 && INTVAL (op) < 0x1000)
+ || (INTVAL (op) >= 0xFFFFF000 && INTVAL (op) < 0x100000000L)))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+#else
+ return (((GET_CODE (op) == CONST_INT && (unsigned) INTVAL (op) < 0x1000)
+ || (GET_CODE (op) == CONST_DOUBLE
+ && CONST_DOUBLE_HIGH (op) == 0
+ && (unsigned) CONST_DOUBLE_LOW (op) - 0xFFFFF000 < 0x1000))
+ || GET_CODE (op) == CONSTANT_P_RTX);
+#endif
+}
+
+int
+uns_arith_operand (op, mode)
+ rtx op;
+ enum machine_mode mode;
+{
+ return register_operand (op, mode) || uns_small_int (op, mode);
+}
+
+/* Return truth value of statement that OP is a call-clobbered register. */
+int
+clobbered_register (op, mode)
+ rtx op;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+{
+ return (GET_CODE (op) == REG && call_used_regs[REGNO (op)]);
+}
+
+/* X and Y are two things to compare using CODE. Emit the compare insn and
+ return the rtx for the cc reg in the proper mode. */
+
+rtx
+gen_compare_reg (code, x, y)
+ enum rtx_code code;
+ rtx x, y;
+{
+ enum machine_mode mode = SELECT_CC_MODE (code, x, y);
+ rtx cc_reg;
+
+ /* ??? We don't have movcc patterns so we cannot generate pseudo regs for the
+ fcc regs (cse can't tell they're really call clobbered regs and will
+ remove a duplicate comparison even if there is an intervening function
+ call - it will then try to reload the cc reg via an int reg which is why
+ we need the movcc patterns). It is possible to provide the movcc
+ patterns by using the ldxfsr/stxfsr v9 insns. I tried it: you need two
+ registers (say %g1,%g5) and it takes about 6 insns. A better fix would be
+ to tell cse that CCFPE mode registers (even pseudos) are call
+ clobbered. */
+
+ /* ??? This is an experiment. Rather than making changes to cse which may
+ or may not be easy/clean, we do our own cse. This is possible because
+ we will generate hard registers. Cse knows they're call clobbered (it
+ doesn't know the same thing about pseudos). If we guess wrong, no big
+ deal, but if we win, great! */
+
+ if (TARGET_V9 && GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+#if 1 /* experiment */
+ {
+ int reg;
+ /* We cycle through the registers to ensure they're all exercised. */
+ static int next_fcc_reg = 0;
+ /* Previous x,y for each fcc reg. */
+ static rtx prev_args[4][2];
+
+ /* Scan prev_args for x,y. */
+ for (reg = 0; reg < 4; reg++)
+ if (prev_args[reg][0] == x && prev_args[reg][1] == y)
+ break;
+ if (reg == 4)
+ {
+ reg = next_fcc_reg;
+ prev_args[reg][0] = x;
+ prev_args[reg][1] = y;
+ next_fcc_reg = (next_fcc_reg + 1) & 3;
+ }
+ cc_reg = gen_rtx_REG (mode, reg + SPARC_FIRST_V9_FCC_REG);
+ }
+#else
+ cc_reg = gen_reg_rtx (mode);
+#endif /* ! experiment */
+ else if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ cc_reg = gen_rtx_REG (mode, SPARC_FCC_REG);
+ else
+ cc_reg = gen_rtx_REG (mode, SPARC_ICC_REG);
+
+ if (TARGET_V8PLUS && mode == CCXmode)
+ {
+ emit_insn (gen_cmpdi_v8plus (x, y));
+ }
+ else
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, cc_reg,
+ gen_rtx_COMPARE (mode, x, y)));
+ }
+
+ return cc_reg;
+}
+
+/* This function is used for v9 only.
+ CODE is the code for an Scc's comparison.
+ OPERANDS[0] is the target of the Scc insn.
+ OPERANDS[1] is the value we compare against const0_rtx (which hasn't
+ been generated yet).
+
+ This function is needed to turn
+
+ (set (reg:SI 110)
+ (gt (reg:CCX 100 %icc)
+ (const_int 0)))
+ into
+ (set (reg:SI 110)
+ (gt:DI (reg:CCX 100 %icc)
+ (const_int 0)))
+
+ IE: The instruction recognizer needs to see the mode of the comparison to
+ find the right instruction. We could use "gt:DI" right in the
+ define_expand, but leaving it out allows us to handle DI, SI, etc.
+
+ We refer to the global sparc compare operands sparc_compare_op0 and
+ sparc_compare_op1. */
+
+int
+gen_v9_scc (compare_code, operands)
+ enum rtx_code compare_code;
+ register rtx *operands;
+{
+ rtx temp, op0, op1;
+
+ if (! TARGET_ARCH64
+ && (GET_MODE (sparc_compare_op0) == DImode
+ || GET_MODE (operands[0]) == DImode))
+ return 0;
+
+ /* Handle the case where operands[0] == sparc_compare_op0.
+ We "early clobber" the result. */
+ if (REGNO (operands[0]) == REGNO (sparc_compare_op0))
+ {
+ op0 = gen_reg_rtx (GET_MODE (sparc_compare_op0));
+ emit_move_insn (op0, sparc_compare_op0);
+ }
+ else
+ op0 = sparc_compare_op0;
+ /* For consistency in the following. */
+ op1 = sparc_compare_op1;
+
+ /* Try to use the movrCC insns. */
+ if (TARGET_ARCH64
+ && GET_MODE_CLASS (GET_MODE (op0)) == MODE_INT
+ && op1 == const0_rtx
+ && v9_regcmp_p (compare_code))
+ {
+ /* Special case for op0 != 0. This can be done with one instruction if
+ operands[0] == sparc_compare_op0. We don't assume they are equal
+ now though. */
+
+ if (compare_code == NE
+ && GET_MODE (operands[0]) == DImode
+ && GET_MODE (op0) == DImode)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], op0));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (DImode,
+ gen_rtx_fmt_ee (compare_code, DImode,
+ op0, const0_rtx),
+ const1_rtx,
+ operands[0])));
+ return 1;
+ }
+
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
+ if (GET_MODE (op0) != DImode)
+ {
+ temp = gen_reg_rtx (DImode);
+ convert_move (temp, op0, 0);
+ }
+ else
+ temp = op0;
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_fmt_ee (compare_code, DImode,
+ temp, const0_rtx),
+ const1_rtx,
+ operands[0])));
+ return 1;
+ }
+ else
+ {
+ operands[1] = gen_compare_reg (compare_code, op0, op1);
+
+ switch (GET_MODE (operands[1]))
+ {
+ case CCmode :
+ case CCXmode :
+ case CCFPEmode :
+ case CCFPmode :
+ break;
+ default :
+ abort ();
+ }
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0], const0_rtx));
+ emit_insn (gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
+ gen_rtx_fmt_ee (compare_code,
+ GET_MODE (operands[1]),
+ operands[1], const0_rtx),
+ const1_rtx, operands[0])));
+ return 1;
+ }
+}
+
+/* Emit a conditional jump insn for the v9 architecture using comparison code
+ CODE and jump target LABEL.
+ This function exists to take advantage of the v9 brxx insns. */
+
+void
+emit_v9_brxx_insn (code, op0, label)
+ enum rtx_code code;
+ rtx op0, label;
+{
+ emit_jump_insn (gen_rtx_SET (VOIDmode,
+ pc_rtx,
+ gen_rtx_IF_THEN_ELSE (VOIDmode,
+ gen_rtx_fmt_ee (code, GET_MODE (op0),
+ op0, const0_rtx),
+ gen_rtx_LABEL_REF (VOIDmode, label),
+ pc_rtx)));
+}
+
+/* Return nonzero if a return peephole merging return with
+ setting of output register is ok. */
+int
+leaf_return_peephole_ok ()
+{
+ return (actual_fsize == 0);
+}
+
+/* Return nonzero if TRIAL can go into the function epilogue's
+ delay slot. SLOT is the slot we are trying to fill. */
+
+int
+eligible_for_epilogue_delay (trial, slot)
+ rtx trial;
+ int slot;
+{
+ rtx pat, src;
+
+ if (slot >= 1)
+ return 0;
+
+ if (GET_CODE (trial) != INSN || GET_CODE (PATTERN (trial)) != SET)
+ return 0;
+
+ if (get_attr_length (trial) != 1)
+ return 0;
+
+ /* If %g0 is live, there are lots of things we can't handle.
+ Rather than trying to find them all now, let's punt and only
+ optimize things as necessary. */
+ if (TARGET_LIVE_G0)
+ return 0;
+
+ /* In the case of a true leaf function, anything can go into the delay slot.
+ A delay slot only exists however if the frame size is zero, otherwise
+ we will put an insn to adjust the stack after the return. */
+ if (leaf_function)
+ {
+ if (leaf_return_peephole_ok ())
+ return ((get_attr_in_uncond_branch_delay (trial)
+ == IN_BRANCH_DELAY_TRUE));
+ return 0;
+ }
+
+ /* If only trivial `restore' insns work, nothing can go in the
+ delay slot. */
+ else if (TARGET_BROKEN_SAVERESTORE)
+ return 0;
+
+ pat = PATTERN (trial);
+
+ /* Otherwise, only operations which can be done in tandem with
+ a `restore' insn can go into the delay slot. */
+ if (GET_CODE (SET_DEST (pat)) != REG
+ || REGNO (SET_DEST (pat)) >= 32
+ || REGNO (SET_DEST (pat)) < 24)
+ return 0;
+
+ /* The set of insns matched here must agree precisely with the set of
+ patterns paired with a RETURN in sparc.md. */
+
+ src = SET_SRC (pat);
+
+ /* This matches "*return_[qhs]i". */
+ if (arith_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (SImode);
+
+ /* This matches "*return_di". */
+ else if (arith_double_operand (src, GET_MODE (src)))
+ return GET_MODE_SIZE (GET_MODE (src)) <= GET_MODE_SIZE (DImode);
+
+ /* This matches "*return_sf_no_fpu". */
+ else if (! TARGET_FPU && restore_operand (SET_DEST (pat), SFmode)
+ && register_operand (src, SFmode))
+ return 1;
+
+ /* This matches "*return_addsi". */
+ else if (GET_CODE (src) == PLUS
+ && arith_operand (XEXP (src, 0), SImode)
+ && arith_operand (XEXP (src, 1), SImode)
+ && (register_operand (XEXP (src, 0), SImode)
+ || register_operand (XEXP (src, 1), SImode)))
+ return 1;
+
+ /* This matches "*return_adddi". */
+ else if (GET_CODE (src) == PLUS
+ && arith_double_operand (XEXP (src, 0), DImode)
+ && arith_double_operand (XEXP (src, 1), DImode)
+ && (register_operand (XEXP (src, 0), DImode)
+ || register_operand (XEXP (src, 1), DImode)))
+ return 1;
+
+ return 0;
+}
+
+static int
+check_return_regs (x)
+ rtx x;
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ return IN_OR_GLOBAL_P (x);
+
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return 1;
+
+ case SET:
+ case IOR:
+ case AND:
+ case XOR:
+ case PLUS:
+ case MINUS:
+ if (check_return_regs (XEXP (x, 1)) == 0)
+ return 0;
+ case NOT:
+ case NEG:
+ case MEM:
+ return check_return_regs (XEXP (x, 0));
+
+ default:
+ return 0;
+ }
+
+}
+
+/* Return 1 if TRIAL references only in and global registers. */
+int
+eligible_for_return_delay (trial)
+ rtx trial;
+{
+ if (GET_CODE (PATTERN (trial)) != SET)
+ return 0;
+
+ return check_return_regs (PATTERN (trial));
+}
+
+int
+short_branch (uid1, uid2)
+ int uid1, uid2;
+{
+ unsigned int delta = insn_addresses[uid1] - insn_addresses[uid2];
+ if (delta + 1024 < 2048)
+ return 1;
+ /* warning ("long branch, distance %d", delta); */
+ return 0;
+}
+
+/* Return non-zero if REG is not used after INSN.
+ We assume REG is a reload reg, and therefore does
+ not live past labels or calls or jumps. */
+int
+reg_unused_after (reg, insn)
+ rtx reg;
+ rtx insn;
+{
+ enum rtx_code code, prev_code = UNKNOWN;
+
+ while ((insn = NEXT_INSN (insn)))
+ {
+ if (prev_code == CALL_INSN && call_used_regs[REGNO (reg)])
+ return 1;
+
+ code = GET_CODE (insn);
+ if (GET_CODE (insn) == CODE_LABEL)
+ return 1;
+
+ if (GET_RTX_CLASS (code) == 'i')
+ {
+ rtx set = single_set (insn);
+ int in_src = set && reg_overlap_mentioned_p (reg, SET_SRC (set));
+ if (set && in_src)
+ return 0;
+ if (set && reg_overlap_mentioned_p (reg, SET_DEST (set)))
+ return 1;
+ if (set == 0 && reg_overlap_mentioned_p (reg, PATTERN (insn)))
+ return 0;
+ }
+ prev_code = code;
+ }
+ return 1;
+}
+
+/* The table we use to reference PIC data. */
+static rtx global_offset_table;
+
+/* The function we use to get at it. */
+static rtx get_pc_symbol;
+static char get_pc_symbol_name[256];
+
+/* Ensure that we are not using patterns that are not OK with PIC. */
+
+int
+check_pic (i)
+ int i;
+{
+ switch (flag_pic)
+ {
+ case 1:
+ if (GET_CODE (recog_operand[i]) == SYMBOL_REF
+ || (GET_CODE (recog_operand[i]) == CONST
+ && ! (GET_CODE (XEXP (recog_operand[i], 0)) == MINUS
+ && (XEXP (XEXP (recog_operand[i], 0), 0)
+ == global_offset_table)
+ && (GET_CODE (XEXP (XEXP (recog_operand[i], 0), 1))
+ == CONST))))
+ abort ();
+ case 2:
+ default:
+ return 1;
+ }
+}
+
+/* Return true if X is an address which needs a temporary register when
+ reloaded while generating PIC code. */
+
+int
+pic_address_needs_scratch (x)
+ rtx x;
+{
+ /* An address which is a symbolic plus a non SMALL_INT needs a temp reg. */
+ if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && ! SMALL_INT (XEXP (XEXP (x, 0), 1)))
+ return 1;
+
+ return 0;
+}
+
+/* Legitimize PIC addresses. If the address is already position-independent,
+ we return ORIG. Newly generated position-independent addresses go into a
+ reg. This is REG if non zero, otherwise we allocate register(s) as
+ necessary. */
+
+rtx
+legitimize_pic_address (orig, mode, reg)
+ rtx orig;
+ enum machine_mode mode ATTRIBUTE_UNUSED;
+ rtx reg;
+{
+ if (GET_CODE (orig) == SYMBOL_REF
+ || GET_CODE (orig) == LABEL_REF)
+ {
+ rtx pic_ref, address;
+ rtx insn;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (flag_pic == 2)
+ {
+ /* If not during reload, allocate another temp reg here for loading
+ in the address, so that these instructions can be optimized
+ properly. */
+ rtx temp_reg = ((reload_in_progress || reload_completed)
+ ? reg : gen_reg_rtx (Pmode));
+
+ /* Must put the SYMBOL_REF inside an UNSPEC here so that cse
+ won't get confused into thinking that these two instructions
+ are loading in the true address of the symbol. If in the
+ future a PIC rtx exists, that should be used instead. */
+ emit_insn (gen_pic_sethi_si (temp_reg, orig));
+ emit_insn (gen_pic_lo_sum_si (temp_reg, temp_reg, orig));
+
+ address = temp_reg;
+ }
+ else
+ address = orig;
+
+ pic_ref = gen_rtx_MEM (Pmode,
+ gen_rtx_PLUS (Pmode,
+ pic_offset_table_rtx, address));
+ current_function_uses_pic_offset_table = 1;
+ RTX_UNCHANGING_P (pic_ref) = 1;
+ insn = emit_move_insn (reg, pic_ref);
+ /* Put a REG_EQUAL note on this insn, so that it can be optimized
+ by loop. */
+ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig,
+ REG_NOTES (insn));
+ return reg;
+ }
+ else if (GET_CODE (orig) == CONST)
+ {
+ rtx base, offset;
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS
+ && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
+ return orig;
+
+ if (reg == 0)
+ {
+ if (reload_in_progress || reload_completed)
+ abort ();
+ else
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ if (GET_CODE (XEXP (orig, 0)) == PLUS)
+ {
+ base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
+ offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
+ base == reg ? 0 : reg);
+ }
+ else
+ abort ();
+
+ if (GET_CODE (offset) == CONST_INT)
+ {
+ if (SMALL_INT (offset))
+ return plus_constant_for_output (base, INTVAL (offset));
+ else if (! reload_in_progress && ! reload_completed)
+ offset = force_reg (Pmode, offset);
+ else
+ /* If we reach here, then something is seriously wrong. */
+ abort ();
+ }
+ return gen_rtx_PLUS (Pmode, base, offset);
+ }
+
+ return orig;
+}
+
+/* Set up PIC-specific rtl. This should not cause any insns
+ to be emitted. */
+
+void
+initialize_pic ()
+{
+}
+
+/* Return the RTX for insns to set the PIC register. */
+
+static rtx
+pic_setup_code ()
+{
+ rtx seq;
+
+ start_sequence ();
+ emit_insn (gen_get_pc (pic_offset_table_rtx, global_offset_table,
+ get_pc_symbol));
+ seq = gen_sequence ();
+ end_sequence ();
+
+ return seq;
+}
+
+/* Emit special PIC prologues and epilogues. */
+
+void
+finalize_pic ()
+{
+ /* Labels to get the PC in the prologue of this function. */
+ int orig_flag_pic = flag_pic;
+ rtx insn;
+
+ if (current_function_uses_pic_offset_table == 0)
+ return;
+
+ if (! flag_pic)
+ abort ();
+
+ /* If we havn't emitted the special get_pc helper function, do so now. */
+ if (get_pc_symbol_name[0] == 0)
+ {
+ ASM_GENERATE_INTERNAL_LABEL (get_pc_symbol_name, "LGETPC", 0);
+
+ text_section ();
+ ASM_OUTPUT_ALIGN (asm_out_file, 3);
+ ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, "LGETPC", 0);
+ fputs ("\tretl\n\tadd %o7,%l7,%l7\n", asm_out_file);
+ }
+
+ /* Initialize every time through, since we can't easily
+ know this to be permanent. */
+ global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
+ get_pc_symbol = gen_rtx_SYMBOL_REF (Pmode, get_pc_symbol_name);
+ flag_pic = 0;
+
+ emit_insn_after (pic_setup_code (), get_insns ());
+
+ /* Insert the code in each nonlocal goto receiver. */
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
+ && XINT (PATTERN (insn), 1) == 4)
+ emit_insn_after (pic_setup_code (), insn);
+
+ flag_pic = orig_flag_pic;
+
+ /* Need to emit this whether or not we obey regdecls,
+ since setjmp/longjmp can cause life info to screw up.
+ ??? In the case where we don't obey regdecls, this is not sufficient
+ since we may not fall out the bottom. */
+ emit_insn (gen_rtx_USE (VOIDmode, pic_offset_table_rtx));
+}
+
+/* Emit insns to move operands[1] into operands[0].
+
+ Return 1 if we have written out everything that needs to be done to
+ do the move. Otherwise, return 0 and the caller will emit the move
+ normally. */
+
+int
+emit_move_sequence (operands, mode)
+ rtx *operands;
+ enum machine_mode mode;
+{
+ register rtx operand0 = operands[0];
+ register rtx operand1 = operands[1];
+
+ if (CONSTANT_P (operand1) && flag_pic
+ && pic_address_needs_scratch (operand1))
+ operands[1] = operand1 = legitimize_pic_address (operand1, mode, 0);
+
+ /* Handle most common case first: storing into a register. */
+ if (register_operand (operand0, mode))
+ {
+ /* Integer constant to FP register. */
+ if (GET_CODE (operand0) == REG
+ && REGNO (operand0) >= 32
+ && REGNO (operand0) < FIRST_PSEUDO_REGISTER
+ && CONSTANT_P (operand1))
+ {
+ operand1 = validize_mem (force_const_mem (GET_MODE (operand0), operand1));
+ }
+
+ if (register_operand (operand1, mode)
+ || (GET_CODE (operand1) == CONST_INT && SMALL_INT (operand1))
+ || (GET_CODE (operand1) == CONST_DOUBLE
+ && arith_double_operand (operand1, DImode))
+ || (GET_CODE (operand1) == HIGH && GET_MODE (operand1) != DImode)
+ /* Only `general_operands' can come here, so MEM is ok. */
+ || GET_CODE (operand1) == MEM)
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ }
+ else if (GET_CODE (operand0) == MEM)
+ {
+ if (register_operand (operand1, mode)
+ || (operand1 == const0_rtx && ! TARGET_LIVE_G0))
+ {
+ /* Run this case quickly. */
+ emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
+ return 1;
+ }
+ if (! reload_in_progress)
+ {
+ operands[0] = validize_mem (operand0);
+ operands[1] = operand1 = force_reg (mode, operand1);
+ }
+ }
+
+ /* DImode HIGH values in sparc64 need a clobber added. */
+ if (TARGET_ARCH64
+ && GET_CODE (operand1) == HIGH && GET_MODE (operand1) == DImode)
+ {
+ emit_insn (gen_sethi_di_sp64 (operand0, XEXP (operand1, 0)));
+ return 1;
+ }
+ /* Simplify the source if we need to. */
+ else if (GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
+ {
+ if (flag_pic && symbolic_operand (operand1, mode))
+ {
+ rtx temp_reg = reload_in_progress ? operand0 : 0;
+
+ operands[1] = legitimize_pic_address (operand1, mode, temp_reg);
+ }
+ else if (GET_CODE (operand1) == CONST_INT
+ ? (! SMALL_INT (operand1)
+ && INTVAL (operand1) != -4096
+ && ! SPARC_SETHI_P (INTVAL (operand1)))
+ : GET_CODE (operand1) == CONST_DOUBLE
+ ? ! arith_double_operand (operand1, DImode)
+ : 1)
+ {
+ /* For DImode values, temp must be operand0 because of the way
+ HI and LO_SUM work. The LO_SUM operator only copies half of
+ the LSW from the dest of the HI operator. If the LO_SUM dest is
+ not the same as the HI dest, then the MSW of the LO_SUM dest will
+ never be set.
+
+ ??? The real problem here is that the ...(HI:DImode pattern emits
+ multiple instructions, and the ...(LO_SUM:DImode pattern emits
+ one instruction. This fails, because the compiler assumes that
+ LO_SUM copies all bits of the first operand to its dest. Better
+ would be to have the HI pattern emit one instruction and the
+ LO_SUM pattern multiple instructions. Even better would be
+ to use four rtl insns. */
+ rtx temp = ((reload_in_progress || mode == DImode)
+ ? operand0 : gen_reg_rtx (mode));
+
+ if (mode == SImode)
+ {
+ if (GET_CODE (operand1) == CONST_INT)
+ operand1 = GEN_INT (INTVAL (operand1) & 0xffffffff);
+ else if (GET_CODE (operand1) == CONST_DOUBLE)
+ operand1 = GEN_INT (CONST_DOUBLE_LOW (operand1) & 0xffffffff);
+ }
+
+ if (TARGET_ARCH64 && mode == DImode)
+ emit_insn (gen_sethi_di_sp64 (temp, operand1));
+ else
+ emit_insn (gen_rtx_SET (VOIDmode, temp,
+ gen_rtx_HIGH (mode, operand1)));
+
+ operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
+ }
+ }
+
+ /* Now have insn-emit do whatever it normally does. */
+ return 0;
+}
+
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as a 4 byte quantity.
+
+ This isn't intended to be very smart. It is up to the caller to
+ choose the best way to do things.
+
+ Note that OPERANDS may be modified to suit the returned string. */
+
+char *
+singlemove_string (operands)
+ rtx *operands;
+{
+ if (GET_CODE (operands[0]) == MEM)
+ {
+ if (GET_CODE (operands[1]) != MEM)
+ return "st %r1,%0";
+ else
+ abort ();
+ }
+ else if (GET_CODE (operands[1]) == MEM)
+ return "ld %1,%0";
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ REAL_VALUE_TYPE r;
+ long i;
+
+ /* Must be SFmode, otherwise this doesn't make sense. */
+ if (GET_MODE (operands[1]) != SFmode)
+ abort ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (r, i);
+ operands[1] = GEN_INT (i);
+
+ if (CONST_OK_FOR_LETTER_P (i, 'I'))
+ return "mov %1,%0";
+ else if ((i & 0x000003FF) != 0)
+ return "sethi %%hi(%a1),%0\n\tor %0,%%lo(%a1),%0";
+ else
+ return "sethi %%hi(%a1),%0";
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ /* Only consider the low 32 bits of the constant. */
+ int i = INTVAL (operands[1]) & 0xffffffff;
+
+ if (SPARC_SIMM13_P (i))
+ return "mov %1,%0";
+
+ if (i == 4096)
+ return "sub %%g0,-4096,%0";
+
+ /* If all low order 10 bits are clear, then we only need a single
+ sethi insn to load the constant. */
+ /* FIXME: Use SETHI_P. */
+ if ((i & 0x000003FF) != 0)
+ return "sethi %%hi(%a1),%0\n\tor %0,%%lo(%a1),%0";
+ else
+ return "sethi %%hi(%a1),%0";
+ }
+ /* Operand 1 must be a register, or a 'I' type CONST_INT. */
+ return "mov %1,%0";
+}
+
+/* Return the best assembler insn template
+ for moving operands[1] into operands[0] as an 8 byte quantity.
+
+ This isn't intended to be very smart. It is up to the caller to
+ choose the best way to do things.
+
+ Note that OPERANDS may be modified to suit the returned string. */
+
+char *
+doublemove_string (operands)
+ rtx *operands;
+{
+ rtx op0 = operands[0], op1 = operands[1];
+
+ if (GET_CODE (op0) == MEM)
+ {
+ if (GET_CODE (op1) == REG)
+ {
+ if (FP_REG_P (op1))
+ return "std %1,%0";
+ return TARGET_ARCH64 ? "stx %1,%0" : "std %1,%0";
+ }
+ if (TARGET_ARCH64
+ && (op1 == const0_rtx
+ || (GET_MODE (op1) != VOIDmode
+ && op1 == CONST0_RTX (GET_MODE (op1)))))
+ return "stx %r1,%0";
+ abort ();
+ }
+ else if (GET_CODE (op1) == MEM)
+ {
+ if (GET_CODE (op0) != REG)
+ abort ();
+ if (FP_REG_P (op0))
+ return "ldd %1,%0";
+ return TARGET_ARCH64 ? "ldx %1,%0" : "ldd %1,%0";
+ }
+ else if (GET_CODE (operands[1]) == CONST_DOUBLE)
+ {
+ /* ??? Unfinished, and maybe not needed. */
+ abort ();
+ }
+ else if (GET_CODE (operands[1]) == CONST_INT
+ && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ {
+ /* ??? Unfinished, and maybe not needed. */
+ abort ();
+ }
+ /* Operand 1 must be a register, or a 'I' type CONST_INT. */
+ return "mov %1,%0";
+}
+
+/* Return non-zero if it is OK to assume that the given memory operand is
+ aligned at least to a 8-byte boundary. This should only be called
+ for memory accesses whose size is 8 bytes or larger. */
+
+int
+mem_aligned_8 (mem)
+ register rtx mem;
+{
+ register rtx addr;
+ register rtx base;
+ register rtx offset;
+
+ if (GET_CODE (mem) != MEM)
+ return 0; /* It's gotta be a MEM! */
+
+ addr = XEXP (mem, 0);
+
+ /* Now that all misaligned double parms are copied on function entry,
+ we can assume any 64-bit object is 64-bit aligned except those which
+ are at unaligned offsets from the stack or frame pointer. If the
+ TARGET_UNALIGNED_DOUBLES switch is given, we do not make this
+ assumption. */
+
+ /* See what register we use in the address. */
+ base = offset = 0;
+ if (GET_CODE (addr) == PLUS)
+ {
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ base = XEXP (addr, 0);
+ offset = XEXP (addr, 1);
+ }
+ }
+ else if (GET_CODE (addr) == REG)
+ {
+ base = addr;
+ offset = const0_rtx;
+ }
+
+ /* If it's the stack or frame pointer, check offset alignment.
+ We can have improper alignment in the function entry code. */
+ if (base
+ && (REGNO (base) == FRAME_POINTER_REGNUM
+ || REGNO (base) == STACK_POINTER_REGNUM))
+ {
+ if (((INTVAL (offset) - SPARC_STACK_BIAS) & 0x7) == 0)
+ return 1;
+ }
+ /* Anything else we know is properly aligned unless TARGET_UNALIGNED_DOUBLES
+ is true, in which case we can only assume that an access is aligned if
+ it is to a constant address, or the address involves a LO_SUM.
+
+ We used to assume an address was aligned if MEM_IN_STRUCT_P was true.
+ That assumption was deleted so that gcc generated code can be used with
+ memory allocators that only guarantee 4 byte alignment. */
+ else if (! TARGET_UNALIGNED_DOUBLES || CONSTANT_P (addr)
+ || GET_CODE (addr) == LO_SUM)
+ return 1;
+
+ /* An obviously unaligned address. */
+ return 0;
+}
+
+enum optype { REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP };
+
+/* Output assembler code to perform a doubleword move insn
+ with operands OPERANDS. This is very similar to the following
+ output_move_quad function. */
+
+char *
+output_move_double (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+ register enum optype optype0;
+ register enum optype optype1;
+ rtx latehalf[2];
+ rtx addreg0 = 0;
+ rtx addreg1 = 0;
+ int highest_first = 0;
+ int no_addreg1_decrement = 0;
+
+ /* First classify both operands. */
+
+ if (REG_P (op0))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (op0))
+ optype0 = OFFSOP;
+ else if (GET_CODE (op0) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (op1))
+ optype1 = REGOP;
+ else if (CONSTANT_P (op1))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (op1))
+ optype1 = OFFSOP;
+ else if (GET_CODE (op1) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP
+ || (optype0 == MEM && optype1 == MEM))
+ abort ();
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the second word. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (op0, 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (op1, 0));
+
+ /* Ok, we can do one word at a time.
+ Set up in LATEHALF the operands to use for the
+ high-numbered (least significant) word and in some cases alter the
+ operands in OPERANDS to be suitable for the low-numbered word. */
+
+ if (optype0 == REGOP)
+ latehalf[0] = gen_rtx_REG (SImode, REGNO (op0) + 1);
+ else if (optype0 == OFFSOP)
+ latehalf[0] = adj_offsettable_operand (op0, 4);
+ else
+ latehalf[0] = op0;
+
+ if (optype1 == REGOP)
+ latehalf[1] = gen_rtx_REG (SImode, REGNO (op1) + 1);
+ else if (optype1 == OFFSOP)
+ latehalf[1] = adj_offsettable_operand (op1, 4);
+ else if (optype1 == CNSTOP)
+ {
+ if (TARGET_ARCH64)
+ {
+ if (arith_double_operand (op1, DImode))
+ {
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
+ return "mov %1,%0";
+ }
+ else
+ {
+ /* The only way to handle CONST_DOUBLEs or other 64 bit
+ constants here is to use a temporary, such as is done
+ for the V9 DImode sethi insn pattern. This is not
+ a practical solution, so abort if we reach here.
+ The md file should always force such constants to
+ memory. */
+ abort ();
+ }
+ }
+ else
+ split_double (op1, &operands[1], &latehalf[1]);
+ }
+ else
+ latehalf[1] = op1;
+
+ /* Easy case: try moving both words at once. Check for moving between
+ an even/odd register pair and a memory location. */
+ if ((optype0 == REGOP && optype1 != REGOP && optype1 != CNSTOP
+ && (TARGET_ARCH64 || (REGNO (op0) & 1) == 0))
+ || (optype0 != REGOP && optype0 != CNSTOP && optype1 == REGOP
+ && (TARGET_ARCH64 || (REGNO (op1) & 1) == 0)))
+ {
+ register rtx mem,reg;
+
+ if (optype0 == REGOP)
+ mem = op1, reg = op0;
+ else
+ mem = op0, reg = op1;
+
+ /* In v9, ldd can be used for word aligned addresses, so technically
+ some of this logic is unneeded. We still avoid ldd if the address
+ is obviously unaligned though.
+
+ Integer ldd/std are deprecated in V9 and are slow on UltraSPARC.
+ Use them only if the access is volatile or not offsettable. */
+
+ if ((mem_aligned_8 (mem)
+ && (REGNO (reg) >= 32
+ || MEM_VOLATILE_P (mem)
+ || ! ((optype0 == OFFSOP || optype1 == OFFSOP)
+ && (sparc_cpu == PROCESSOR_ULTRASPARC
+ || sparc_cpu == PROCESSOR_V9))))
+ /* If this is a floating point register higher than %f31,
+ then we *must* use an aligned load, since `ld' will not accept
+ the register number. */
+ || (TARGET_V9 && REGNO (reg) >= 64)
+ /* Even if two instructions would otherwise be better than ldd/std,
+ if this insn was put in a delay slot because reorg thought it
+ was only one machine instruction, make sure it is only one
+ instruction. */
+ || dbr_sequence_length () != 0)
+ {
+ if (FP_REG_P (reg) || ! TARGET_ARCH64)
+ return (mem == op1 ? "ldd %1,%0" : "std %1,%0");
+ else
+ return (mem == op1 ? "ldx %1,%0" : "stx %1,%0");
+ }
+ }
+
+ if (TARGET_ARCH64)
+ {
+ if (optype0 == REGOP && optype1 == REGOP)
+ {
+ if (FP_REG_P (op0))
+ return "fmovd %1,%0";
+ else
+ return "mov %1,%0";
+ }
+ }
+
+ /* If the first move would clobber the source of the second one,
+ do them in the other order. */
+
+ /* Overlapping registers. */
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (op0) == REGNO (latehalf[1]))
+ {
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+ /* Do low-numbered word. */
+ return singlemove_string (operands);
+ }
+ /* Loading into a register which overlaps a register used in the address. */
+ else if (optype0 == REGOP && optype1 != REGOP
+ && reg_overlap_mentioned_p (op0, op1))
+ {
+ /* If both halves of dest are used in the src memory address,
+ add the two regs and put them in the low reg (op0).
+ Then it works to load latehalf first. */
+ if (reg_mentioned_p (op0, XEXP (op1, 0))
+ && reg_mentioned_p (latehalf[0], XEXP (op1, 0)))
+ {
+ rtx xops[2];
+ xops[0] = latehalf[0];
+ xops[1] = op0;
+ output_asm_insn ("add %1,%0,%1", xops);
+ operands[1] = gen_rtx_MEM (DImode, op0);
+ latehalf[1] = adj_offsettable_operand (operands[1], 4);
+ addreg1 = 0;
+ highest_first = 1;
+ }
+ /* Only one register in the dest is used in the src memory address,
+ and this is the first register of the dest, so we want to do
+ the late half first here also. */
+ else if (! reg_mentioned_p (latehalf[0], XEXP (op1, 0)))
+ highest_first = 1;
+ /* Only one register in the dest is used in the src memory address,
+ and this is the second register of the dest, so we want to do
+ the late half last. If addreg1 is set, and addreg1 is the same
+ register as latehalf, then we must suppress the trailing decrement,
+ because it would clobber the value just loaded. */
+ else if (addreg1 && reg_mentioned_p (addreg1, latehalf[0]))
+ no_addreg1_decrement = 1;
+ }
+
+ /* Normal case: do the two words, low-numbered first.
+ Overlap case (highest_first set): do high-numbered word first. */
+
+ if (! highest_first)
+ output_asm_insn (singlemove_string (operands), operands);
+
+ /* Make any unoffsettable addresses point at high-numbered word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+
+ /* Do that word. */
+ output_asm_insn (singlemove_string (latehalf), latehalf);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x4,%0", &addreg0);
+ if (addreg1 && ! no_addreg1_decrement)
+ output_asm_insn ("add %0,-0x4,%0", &addreg1);
+
+ if (highest_first)
+ output_asm_insn (singlemove_string (operands), operands);
+
+ return "";
+}
+
+/* Output assembler code to perform a quadword move insn
+ with operands OPERANDS. This is very similar to the preceding
+ output_move_double function. */
+
+char *
+output_move_quad (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+ register enum optype optype0;
+ register enum optype optype1;
+ rtx wordpart[4][2];
+ rtx load_late[4];
+ int load_late_half[2];
+ rtx addreg0 = 0;
+ rtx addreg1 = 0;
+
+ load_late_half[0] = 0; load_late_half[1] = 0;
+ load_late[0] = 0; load_late[1] = 0; load_late[2] = 0;
+ load_late[3] = 0;
+
+ wordpart[0][0] = NULL; wordpart[1][0] = NULL; wordpart[2][0] = NULL;
+ wordpart[3][0] = NULL;
+
+ /* First classify both operands. */
+
+ if (REG_P (op0))
+ optype0 = REGOP;
+ else if (offsettable_memref_p (op0))
+ optype0 = OFFSOP;
+ else if (GET_CODE (op0) == MEM)
+ optype0 = MEMOP;
+ else
+ optype0 = RNDOP;
+
+ if (REG_P (op1))
+ optype1 = REGOP;
+ else if (CONSTANT_P (op1))
+ optype1 = CNSTOP;
+ else if (offsettable_memref_p (op1))
+ optype1 = OFFSOP;
+ else if (GET_CODE (op1) == MEM)
+ optype1 = MEMOP;
+ else
+ optype1 = RNDOP;
+
+ /* Check for the cases that the operand constraints are not
+ supposed to allow to happen. Abort if we get one,
+ because generating code for these cases is painful. */
+
+ if (optype0 == RNDOP || optype1 == RNDOP
+ || (optype0 == MEM && optype1 == MEM))
+ abort ();
+
+ if (optype0 == REGOP)
+ {
+ wordpart[0][0] = gen_rtx_REG (word_mode, REGNO (op0) + 0);
+ if (TARGET_ARCH64 && FP_REG_P (op0)
+ && REGNO (op0) < SPARC_FIRST_V9_FP_REG)
+ wordpart[1][0] = gen_rtx_REG (word_mode, REGNO (op0) + 2);
+ else
+ wordpart[1][0] = gen_rtx_REG (word_mode, REGNO (op0) + 1);
+
+ if (TARGET_ARCH32)
+ {
+ wordpart[2][0] = gen_rtx_REG (word_mode, REGNO (op0) + 2);
+ wordpart[3][0] = gen_rtx_REG (word_mode, REGNO (op0) + 3);
+ }
+
+ /* Loading into a register which overlaps a register used in the
+ address. */
+ if (optype1 != REGOP && reg_overlap_mentioned_p (op0, op1))
+ {
+ int i;
+ int count;
+
+ count = 0;
+
+ for (i = 0; i < 4 && wordpart[i][0] != NULL; i++)
+ {
+ if (reg_mentioned_p (wordpart[i][0], op1))
+ {
+ load_late[i] = wordpart[i][0];
+ load_late_half[TARGET_ARCH64 ? i : i/2] = 1;
+ count++;
+ }
+ }
+ if (count > 2)
+ {
+ /* Not sure what to do here. Multiple adds? Can't happen. */
+ abort ();
+ }
+ else if (count == 2)
+ {
+ /* We have a two-address source operand, and both registers
+ overlap with the dest quad. Add them together and
+ store the result into the last register of the quad being
+ loaded, then generate an appropriate MEM insn. */
+ rtx temp[3];
+ int place = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ if (load_late[i])
+ {
+ temp[place++] = load_late[i];
+ load_late[i] = 0;
+ }
+ }
+ temp[2] = wordpart[3][0];
+ output_asm_insn ("add %0, %1, %2", temp);
+ load_late_half[0] = 0;
+ load_late_half[1] = 1;
+ op1 = gen_rtx_MEM (TFmode, wordpart[3][0]);
+ operands[1] = op1;
+ optype1 = OFFSOP;
+ }
+ }
+ }
+
+ /* If an operand is an unoffsettable memory ref, find a register
+ we can increment temporarily to make it refer to the later words. */
+
+ if (optype0 == MEMOP)
+ addreg0 = find_addr_reg (XEXP (op0, 0));
+
+ if (optype1 == MEMOP)
+ addreg1 = find_addr_reg (XEXP (op1, 0));
+
+ /* Ok, we can do one word at a time.
+ Set up in wordpart the operands to use for each word of the arguments. */
+
+ if (optype0 == OFFSOP)
+ {
+ wordpart[0][0] = adj_offsettable_operand (op0, 0);
+ if (TARGET_ARCH32)
+ {
+ wordpart[1][0] = adj_offsettable_operand (op0, 4);
+ wordpart[2][0] = adj_offsettable_operand (op0, 8);
+ wordpart[3][0] = adj_offsettable_operand (op0, 12);
+ }
+ else
+ wordpart[1][0] = adj_offsettable_operand (op0, 8);
+ }
+ else if (optype0 != REGOP)
+ {
+ wordpart[0][0] = op0;
+ wordpart[1][0] = op0;
+ wordpart[2][0] = op0;
+ wordpart[3][0] = op0;
+ }
+
+ if (optype1 == REGOP)
+ {
+ wordpart[0][1] = gen_rtx_REG (word_mode, REGNO (op1) + 0);
+ if (TARGET_ARCH64 && FP_REG_P (op1)
+ && REGNO (op1) < SPARC_FIRST_V9_FP_REG)
+ wordpart[1][1] = gen_rtx_REG (word_mode, REGNO (op1) + 2);
+ else
+ wordpart[1][1] = gen_rtx_REG (word_mode, REGNO (op1) + 1);
+
+ if (TARGET_ARCH32)
+ {
+ wordpart[2][1] = gen_rtx_REG (word_mode, REGNO (op1) + 2);
+ wordpart[3][1] = gen_rtx_REG (word_mode, REGNO (op1) + 3);
+ }
+ }
+ else if (optype1 == OFFSOP)
+ {
+ wordpart[0][1] = adj_offsettable_operand (op1, 0);
+ if (TARGET_ARCH32)
+ {
+ wordpart[1][1] = adj_offsettable_operand (op1, 4);
+ wordpart[2][1] = adj_offsettable_operand (op1, 8);
+ wordpart[3][1] = adj_offsettable_operand (op1, 12);
+ }
+ else
+ wordpart[1][1] = adj_offsettable_operand (op1, 8);
+ }
+ else if (optype1 == CNSTOP)
+ {
+ REAL_VALUE_TYPE r;
+ long l[4];
+
+ /* This only works for TFmode floating point constants. */
+ if (GET_CODE (op1) != CONST_DOUBLE || GET_MODE (op1) != TFmode)
+ abort ();
+
+ REAL_VALUE_FROM_CONST_DOUBLE (r, op1);
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
+
+ wordpart[0][1] = GEN_INT (l[0]);
+ wordpart[1][1] = GEN_INT (l[1]);
+ wordpart[2][1] = GEN_INT (l[2]);
+ wordpart[3][1] = GEN_INT (l[3]);
+ }
+ else
+ {
+ wordpart[0][1] = op1;
+ wordpart[1][1] = op1;
+ wordpart[2][1] = op1;
+ wordpart[3][1] = op1;
+ }
+
+ /* Easy case: try moving the quad as two pairs. Check for moving between
+ an even/odd register pair and a memory location.
+ Also handle new v9 fp regs here. */
+ /* ??? Should also handle the case of non-offsettable addresses here.
+ We can at least do the first pair as a ldd/std, and then do the third
+ and fourth words individually. */
+ if ((optype0 == REGOP && optype1 == OFFSOP && (REGNO (op0) & 1) == 0)
+ || (optype0 == OFFSOP && optype1 == REGOP && (REGNO (op1) & 1) == 0))
+ {
+ rtx mem, reg;
+ int use_ldx;
+
+ if (optype0 == REGOP)
+ mem = op1, reg = op0;
+ else
+ mem = op0, reg = op1;
+
+ if (mem_aligned_8 (mem)
+ /* If this is a floating point register higher than %f31,
+ then we *must* use an aligned load, since `ld' will not accept
+ the register number. */
+ || (TARGET_V9 && REGNO (reg) >= SPARC_FIRST_V9_FP_REG))
+ {
+ static char * const mov_by_64[2][2][2] = {
+ { { "std %S1,%2;std %1,%0", "stx %R1,%2;stx %1,%0" },
+ { "ldd %2,%S0;ldd %1,%0", "ldx %2,%R0;ldx %1,%0" } },
+ { { "std %1,%0;std %S1,%2", "stx %1,%0;stx %R1,%2" },
+ { "ldd %1,%0;ldd %2,%S0", "ldx %1,%0;ldx %2,%R0" } }
+ };
+
+ if (TARGET_V9 && FP_REG_P (reg) && TARGET_HARD_QUAD)
+ {
+ /* Only abort if the register # requires that we use ldq. */
+ if ((REGNO (reg) & 3) == 0)
+ {
+ /* ??? Can `mem' have an inappropriate alignment here? */
+ return (mem == op1 ? "ldq %1,%0" : "stq %1,%0");
+ }
+ else
+ {
+ if (REGNO (reg) >= SPARC_FIRST_V9_FP_REG)
+ abort();
+ }
+ }
+ operands[2] = adj_offsettable_operand (mem, 8);
+
+ /* Do the loads in the right order; can't overwrite our address
+ register. */
+ use_ldx = TARGET_ARCH64 && !FP_REG_P (reg);
+ return mov_by_64[!load_late_half[0]][mem == op1][use_ldx];
+ }
+ }
+
+ /* If the first move would clobber the source of the second one,
+ do them in the other order. */
+
+ /* Overlapping registers? */
+ if (TARGET_ARCH32)
+ {
+ if (optype0 == REGOP && optype1 == REGOP
+ && (REGNO (op0) == REGNO (wordpart[1][3])
+ || REGNO (op0) == REGNO (wordpart[1][2])
+ || REGNO (op0) == REGNO (wordpart[1][1])))
+ {
+ /* Do fourth word. */
+ output_asm_insn (singlemove_string (wordpart[3]), wordpart[3]);
+ /* Do the third word. */
+ output_asm_insn (singlemove_string (wordpart[2]), wordpart[2]);
+ /* Do the second word. */
+ output_asm_insn (singlemove_string (wordpart[1]), wordpart[1]);
+ /* Do lowest-numbered word. */
+ output_asm_insn (singlemove_string (wordpart[0]), wordpart[0]);
+ return "";
+ }
+ }
+ else /* TARGET_ARCH64 */
+ {
+ if (optype0 == REGOP && optype1 == REGOP
+ && REGNO (op0) == REGNO (wordpart[1][1]))
+ {
+ output_asm_insn ("mov %1,%0", wordpart[1]);
+ output_asm_insn ("mov %1,%0", wordpart[0]);
+ return "";
+ }
+ }
+
+ /* Normal case: move the words in lowest to highest address order.
+ There may have an overlapping register; in that case, skip and go
+ back. */
+
+ if (TARGET_ARCH32)
+ {
+ int i;
+ int offset = 0xc;
+ rtx temp[2];
+
+ for (i = 0; i < 4; i++)
+ {
+ if (! load_late[i])
+ output_asm_insn (singlemove_string (wordpart[i]), wordpart[i]);
+
+ if (i != 3)
+ {
+ /* Make any unoffsettable addresses point at the next word. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x4,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x4,%0", &addreg1);
+ }
+ }
+ for (i = 0; i < 4; i++)
+ {
+ if (load_late[i])
+ {
+ int fix = offset - i * 4;
+
+ /* Back up to the appropriate place. */
+ temp[1] = GEN_INT (-fix);
+ if (addreg0)
+ {
+ temp[0] = addreg0;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ if (addreg1)
+ {
+ temp[0] = addreg1;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ output_asm_insn (singlemove_string (wordpart[i]),
+ wordpart[i]);
+ /* Don't modify the register that's the destination of the
+ move. */
+ temp[0] = GEN_INT (-(offset - fix));
+ if (addreg0 && REGNO (addreg0) != REGNO (wordpart[i][0]))
+ {
+ temp[1] = addreg0;
+ output_asm_insn("add %0,%1,%0", temp);
+ }
+ if (addreg1 && REGNO (addreg1) != REGNO (wordpart[i][0]))
+ {
+ temp[1] = addreg1;
+ output_asm_insn("add %0,%1,%0",temp);
+ }
+ offset = 0;
+ break;
+ }
+ }
+ if (offset)
+ {
+ temp[1] = GEN_INT (-offset);
+ /* Undo the adds we just did. */
+ if (addreg0)
+ {
+ temp[0] = addreg0;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ if (addreg1)
+ {
+ temp[0] = addreg1;
+ output_asm_insn ("add %0,%1,%0", temp);
+ }
+ }
+ }
+ else /* TARGET_ARCH64 */
+ {
+ if (load_late_half[0])
+ {
+ /* Load the second half first. */
+ if (addreg0)
+ output_asm_insn ("add %0,0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x8,%0", &addreg1);
+
+ output_asm_insn (doublemove_string (wordpart[1]), wordpart[1]);
+
+ /* Undo the adds we just did. */
+ if (addreg0)
+ output_asm_insn ("add %0,-0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,-0x8,%0", &addreg1);
+
+ output_asm_insn (doublemove_string (wordpart[0]), wordpart[0]);
+ }
+ else
+ {
+ output_asm_insn (doublemove_string (wordpart[0]), wordpart[0]);
+
+ if (addreg0)
+ output_asm_insn ("add %0,0x8,%0", &addreg0);
+ if (addreg1)
+ output_asm_insn ("add %0,0x8,%0", &addreg1);
+
+ /* Do the second word. */
+ output_asm_insn (doublemove_string (wordpart[1]), wordpart[1]);
+
+ /* Undo the adds we just did. But don't modify the dest of
+ the move. */
+ if (addreg0 && REGNO (addreg0) != REGNO (wordpart[1][0]))
+ output_asm_insn ("add %0,-0x8,%0", &addreg0);
+ if (addreg1 && REGNO (addreg1) != REGNO (wordpart[1][0]))
+ output_asm_insn ("add %0,-0x8,%0", &addreg1);
+ }
+ }
+
+ return "";
+}
+
+/* Output assembler code to perform a doubleword move insn with operands
+ OPERANDS, one of which must be a floating point register. */
+
+char *
+output_fp_move_double (operands)
+ rtx *operands;
+{
+ if (FP_REG_P (operands[0]))
+ {
+ if (FP_REG_P (operands[1]))
+ {
+ if (TARGET_V9)
+ return "fmovd %1,%0";
+ else
+ return "fmovs %1,%0\n\tfmovs %R1,%R0";
+ }
+ else if (GET_CODE (operands[1]) == REG)
+ abort ();
+ else
+ return output_move_double (operands);
+ }
+ else if (FP_REG_P (operands[1]))
+ {
+ if (GET_CODE (operands[0]) == REG)
+ abort ();
+ else
+ return output_move_double (operands);
+ }
+ else abort ();
+}
+
+/* When doing a quad-register move, determine the drection in which
+ the move needs to be performed. SRC and DST are the source and
+ destination registers.
+
+ A value of -1 indicates that the move needs to be done from the
+ highest register to the lowest. */
+
+static int
+move_quad_direction (src, dst)
+ rtx src, dst;
+{
+ if ((REGNO (dst) > REGNO (src))
+ && (REGNO (dst) < (REGNO (src) + 4)))
+ return -1;
+ else
+ return 1;
+}
+
+/* Output assembler code to perform a quadword move insn with operands
+ OPERANDS, one of which must be a floating point register. */
+
+char *
+output_fp_move_quad (operands)
+ rtx *operands;
+{
+ register rtx op0 = operands[0];
+ register rtx op1 = operands[1];
+
+ if (FP_REG_P (op0))
+ {
+ if (FP_REG_P (op1))
+ {
+ if (TARGET_V9 && TARGET_HARD_QUAD)
+ return "fmovq %1,%0";
+ else if (TARGET_V9)
+ {
+ int dir = move_quad_direction (op1, op0);
+ if (dir > 0)
+ return "fmovd %1,%0\n\tfmovd %S1,%S0";
+ else
+ return "fmovd %S1,%S0\n\tfmovd %1,%0";
+ }
+ else
+ {
+ int dir = move_quad_direction (op0, op1);
+ if (dir > 0)
+ return "fmovs %1,%0\n\tfmovs %R1,%R0\n\tfmovs %S1,%S0\n\tfmovs %T1,%T0";
+ else
+ return "fmovs %T1,%T0\n\tfmovs %S1,%S0\n\tfmovs %R1,%R0\n\tfmovs %1,%0";
+ }
+ }
+ else if (GET_CODE (op1) == REG)
+ abort ();
+ else
+ return output_move_quad (operands);
+ }
+ else if (FP_REG_P (op1))
+ {
+ if (GET_CODE (op0) == REG)
+ abort ();
+ else
+ return output_move_quad (operands);
+ }
+ else
+ abort ();
+}
+
+/* Return a REG that occurs in ADDR with coefficient 1.
+ ADDR can be effectively incremented by incrementing REG. */
+
+static rtx
+find_addr_reg (addr)
+ rtx addr;
+{
+ while (GET_CODE (addr) == PLUS)
+ {
+ /* We absolutely can not fudge the frame pointer here, because the
+ frame pointer must always be 8 byte aligned. It also confuses
+ debuggers. */
+ if (GET_CODE (XEXP (addr, 0)) == REG
+ && REGNO (XEXP (addr, 0)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 0);
+ else if (GET_CODE (XEXP (addr, 1)) == REG
+ && REGNO (XEXP (addr, 1)) != FRAME_POINTER_REGNUM)
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 0)))
+ addr = XEXP (addr, 1);
+ else if (CONSTANT_P (XEXP (addr, 1)))
+ addr = XEXP (addr, 0);
+ else
+ abort ();
+ }
+ if (GET_CODE (addr) == REG)
+ return addr;
+ abort ();
+}
+
+/* Output reasonable peephole for set-on-condition-code insns.
+ Note that these insns assume a particular way of defining
+ labels. Therefore, *both* sparc.h and this function must
+ be changed if a new syntax is needed. */
+
+char *
+output_scc_insn (operands, insn)
+ rtx operands[];
+ rtx insn;
+{
+ static char string[100];
+ rtx label = 0, next = insn;
+ int need_label = 0;
+
+ /* This code used to be called with final_sequence nonzero (for fpcc
+ delay slots), but that is no longer allowed. */
+ if (final_sequence)
+ abort ();
+
+ /* On UltraSPARC a conditional moves blocks until 3 cycles after prior loads
+ complete. It might be beneficial here to use branches if any recent
+ instructions were loads. */
+ if (TARGET_V9 && REGNO (operands[1]) == SPARC_ICC_REG)
+ return "mov 0,%0\n\tmov%C2 %x1,1,%0";
+
+ /* Try doing a jump optimization which jump.c can't do for us
+ because we did not expose that setcc works by using branches.
+
+ If this scc insn is followed by an unconditional branch, then have
+ the jump insn emitted here jump to that location, instead of to
+ the end of the scc sequence as usual. */
+
+ do
+ {
+ if (GET_CODE (next) == CODE_LABEL)
+ label = next;
+ next = NEXT_INSN (next);
+ }
+ while (next && (GET_CODE (next) == NOTE || GET_CODE (next) == CODE_LABEL));
+
+ if (next && GET_CODE (next) == JUMP_INSN && simplejump_p (next))
+ label = JUMP_LABEL (next);
+
+ /* If not optimizing, jump label fields are not set. To be safe, always
+ check here to whether label is still zero. */
+ if (label == 0)
+ {
+ label = gen_label_rtx ();
+ need_label = 1;
+ }
+
+ LABEL_NUSES (label) += 1;
+
+ /* operands[3] is an unused slot. */
+ operands[3] = label;
+
+ strcpy (string, output_cbranch (operands[2], 3, 0, 1, 0, 0));
+ strcat (string, "\n\tmov 1,%0\n\tmov 0,%0");
+
+ if (need_label)
+ strcat (string, "\n%l3:");
+
+ return string;
+}
+
+/* Vectors to keep interesting information about registers where it can easily
+ be got. We use to use the actual mode value as the bit number, but there
+ are more than 32 modes now. Instead we use two tables: one indexed by
+ hard register number, and one indexed by mode. */
+
+/* The purpose of sparc_mode_class is to shrink the range of modes so that
+ they all fit (as bit numbers) in a 32 bit word (again). Each real mode is
+ mapped into one sparc_mode_class mode. */
+
+enum sparc_mode_class {
+ S_MODE, D_MODE, T_MODE, O_MODE,
+ SF_MODE, DF_MODE, TF_MODE, OF_MODE,
+ CC_MODE, CCFP_MODE
+};
+
+/* Modes for single-word and smaller quantities. */
+#define S_MODES ((1 << (int) S_MODE) | (1 << (int) SF_MODE))
+
+/* Modes for double-word and smaller quantities. */
+#define D_MODES (S_MODES | (1 << (int) D_MODE) | (1 << DF_MODE))
+
+/* Modes for quad-word and smaller quantities. */
+#define T_MODES (D_MODES | (1 << (int) T_MODE) | (1 << (int) TF_MODE))
+
+/* Modes for single-float quantities. We must allow any single word or
+ smaller quantity. This is because the fix/float conversion instructions
+ take integer inputs/outputs from the float registers. */
+#define SF_MODES (S_MODES)
+
+/* Modes for double-float and smaller quantities. */
+#define DF_MODES (S_MODES | D_MODES)
+
+#define DF_MODES64 DF_MODES
+
+/* Modes for double-float only quantities. */
+#define DF_ONLY_MODES ((1 << (int) DF_MODE) | (1 << (int) D_MODE))
+
+/* Modes for double-float and larger quantities. */
+#define DF_UP_MODES (DF_ONLY_MODES | TF_ONLY_MODES)
+
+/* Modes for quad-float only quantities. */
+#define TF_ONLY_MODES (1 << (int) TF_MODE)
+
+/* Modes for quad-float and smaller quantities. */
+#define TF_MODES (DF_MODES | TF_ONLY_MODES)
+
+#define TF_MODES64 (DF_MODES64 | TF_ONLY_MODES)
+
+/* Modes for condition codes. */
+#define CC_MODES (1 << (int) CC_MODE)
+#define CCFP_MODES (1 << (int) CCFP_MODE)
+
+/* Value is 1 if register/mode pair is acceptable on sparc.
+ The funny mixture of D and T modes is because integer operations
+ do not specially operate on tetra quantities, so non-quad-aligned
+ registers can hold quadword quantities (except %o4 and %i4 because
+ they cross fixed registers). */
+
+/* This points to either the 32 bit or the 64 bit version. */
+int *hard_regno_mode_classes;
+
+static int hard_32bit_mode_classes[] = {
+ S_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES,
+ T_MODES, S_MODES, T_MODES, S_MODES, D_MODES, S_MODES, D_MODES, S_MODES,
+
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+ TF_MODES, SF_MODES, DF_MODES, SF_MODES, TF_MODES, SF_MODES, DF_MODES, SF_MODES,
+
+ /* FP regs f32 to f63. Only the even numbered registers actually exist,
+ and none can hold SFmode/SImode values. */
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+
+ /* %fcc[0123] */
+ CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
+
+ /* %icc */
+ CC_MODES
+};
+
+static int hard_64bit_mode_classes[] = {
+ D_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+ T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES, T_MODES, D_MODES,
+
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+ TF_MODES64, SF_MODES, DF_MODES64, SF_MODES, TF_MODES64, SF_MODES, DF_MODES64, SF_MODES,
+
+ /* FP regs f32 to f63. Only the even numbered registers actually exist,
+ and none can hold SFmode/SImode values. */
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+ DF_UP_MODES, 0, DF_ONLY_MODES, 0, DF_UP_MODES, 0, DF_ONLY_MODES, 0,
+
+ /* %fcc[0123] */
+ CCFP_MODES, CCFP_MODES, CCFP_MODES, CCFP_MODES,
+
+ /* %icc */
+ CC_MODES
+};
+
+int sparc_mode_class [NUM_MACHINE_MODES];
+
+enum reg_class sparc_regno_reg_class[FIRST_PSEUDO_REGISTER];
+
+static void
+sparc_init_modes ()
+{
+ int i;
+
+ for (i = 0; i < NUM_MACHINE_MODES; i++)
+ {
+ switch (GET_MODE_CLASS (i))
+ {
+ case MODE_INT:
+ case MODE_PARTIAL_INT:
+ case MODE_COMPLEX_INT:
+ if (GET_MODE_SIZE (i) <= 4)
+ sparc_mode_class[i] = 1 << (int) S_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ sparc_mode_class[i] = 1 << (int) D_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ sparc_mode_class[i] = 1 << (int) T_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ sparc_mode_class[i] = 1 << (int) O_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ case MODE_FLOAT:
+ case MODE_COMPLEX_FLOAT:
+ if (GET_MODE_SIZE (i) <= 4)
+ sparc_mode_class[i] = 1 << (int) SF_MODE;
+ else if (GET_MODE_SIZE (i) == 8)
+ sparc_mode_class[i] = 1 << (int) DF_MODE;
+ else if (GET_MODE_SIZE (i) == 16)
+ sparc_mode_class[i] = 1 << (int) TF_MODE;
+ else if (GET_MODE_SIZE (i) == 32)
+ sparc_mode_class[i] = 1 << (int) OF_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ case MODE_CC:
+ default:
+ /* mode_class hasn't been initialized yet for EXTRA_CC_MODES, so
+ we must explicitly check for them here. */
+ if (i == (int) CCFPmode || i == (int) CCFPEmode)
+ sparc_mode_class[i] = 1 << (int) CCFP_MODE;
+ else if (i == (int) CCmode || i == (int) CC_NOOVmode
+ || i == (int) CCXmode || i == (int) CCX_NOOVmode)
+ sparc_mode_class[i] = 1 << (int) CC_MODE;
+ else
+ sparc_mode_class[i] = 0;
+ break;
+ }
+ }
+
+ if (TARGET_ARCH64)
+ hard_regno_mode_classes = hard_64bit_mode_classes;
+ else
+ hard_regno_mode_classes = hard_32bit_mode_classes;
+
+ /* Initialize the array used by REGNO_REG_CLASS. */
+ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ {
+ if (i < 16 && TARGET_V8PLUS)
+ sparc_regno_reg_class[i] = I64_REGS;
+ else if (i < 32)
+ sparc_regno_reg_class[i] = GENERAL_REGS;
+ else if (i < 64)
+ sparc_regno_reg_class[i] = FP_REGS;
+ else if (i < 96)
+ sparc_regno_reg_class[i] = EXTRA_FP_REGS;
+ else if (i < 100)
+ sparc_regno_reg_class[i] = FPCC_REGS;
+ else
+ sparc_regno_reg_class[i] = NO_REGS;
+ }
+}
+
+/* Save non call used registers from LOW to HIGH at BASE+OFFSET.
+ N_REGS is the number of 4-byte regs saved thus far. This applies even to
+ v9 int regs as it simplifies the code. */
+
+static int
+save_regs (file, low, high, base, offset, n_regs, real_offset)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+ int n_regs;
+ int real_offset;
+{
+ int i;
+
+ if (TARGET_ARCH64 && high <= 32)
+ {
+ for (i = low; i < high; i++)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ {
+ fprintf (file, "\tstx %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
+ n_regs += 2;
+ }
+ }
+ }
+ else
+ {
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ {
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ {
+ fprintf (file, "\tstd %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ dwarf2out_reg_save (l, i, real_offset + 4 * n_regs);
+ dwarf2out_reg_save (l, i+1, real_offset + 4 * n_regs + 4);
+ }
+ n_regs += 2;
+ }
+ else
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i], base, offset + 4 * n_regs);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i, real_offset + 4 * n_regs);
+ n_regs += 2;
+ }
+ }
+ else
+ {
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[i+1], base, offset + 4 * n_regs + 4);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", i + 1, real_offset + 4 * n_regs + 4);
+ n_regs += 2;
+ }
+ }
+ }
+ }
+ return n_regs;
+}
+
+/* Restore non call used registers from LOW to HIGH at BASE+OFFSET.
+
+ N_REGS is the number of 4-byte regs saved thus far. This applies even to
+ v9 int regs as it simplifies the code. */
+
+static int
+restore_regs (file, low, high, base, offset, n_regs)
+ FILE *file;
+ int low, high;
+ char *base;
+ int offset;
+ int n_regs;
+{
+ int i;
+
+ if (TARGET_ARCH64 && high <= 32)
+ {
+ for (i = low; i < high; i++)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ fprintf (file, "\tldx [%s+%d], %s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ }
+ }
+ else
+ {
+ for (i = low; i < high; i += 2)
+ {
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tldd [%s+%d], %s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ else
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_regs, reg_names[i]),
+ n_regs += 2;
+ else if (regs_ever_live[i+1] && ! call_used_regs[i+1])
+ fprintf (file, "\tld [%s+%d],%s\n",
+ base, offset + 4 * n_regs + 4, reg_names[i+1]),
+ n_regs += 2;
+ }
+ }
+ return n_regs;
+}
+
+/* Static variables we want to share between prologue and epilogue. */
+
+/* Number of live general or floating point registers needed to be saved
+ (as 4-byte quantities). This is only done if TARGET_EPILOGUE. */
+static int num_gfregs;
+
+/* Compute the frame size required by the function. This function is called
+ during the reload pass and also by output_function_prologue(). */
+
+int
+compute_frame_size (size, leaf_function)
+ int size;
+ int leaf_function;
+{
+ int n_regs = 0, i;
+ int outgoing_args_size = (current_function_outgoing_args_size
+ + REG_PARM_STACK_SPACE (current_function_decl));
+
+ if (TARGET_EPILOGUE)
+ {
+ /* N_REGS is the number of 4-byte regs saved thus far. This applies
+ even to v9 int regs to be consistent with save_regs/restore_regs. */
+
+ if (TARGET_ARCH64)
+ {
+ for (i = 0; i < 8; i++)
+ if (regs_ever_live[i] && ! call_used_regs[i])
+ n_regs += 2;
+ }
+ else
+ {
+ for (i = 0; i < 8; i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_regs += 2;
+ }
+
+ for (i = 32; i < (TARGET_V9 ? 96 : 64); i += 2)
+ if ((regs_ever_live[i] && ! call_used_regs[i])
+ || (regs_ever_live[i+1] && ! call_used_regs[i+1]))
+ n_regs += 2;
+ }
+
+ /* Set up values for use in `function_epilogue'. */
+ num_gfregs = n_regs;
+
+ if (leaf_function && n_regs == 0
+ && size == 0 && current_function_outgoing_args_size == 0)
+ {
+ actual_fsize = apparent_fsize = 0;
+ }
+ else
+ {
+ /* We subtract STARTING_FRAME_OFFSET, remember it's negative.
+ The stack bias (if any) is taken out to undo its effects. */
+ apparent_fsize = (size - STARTING_FRAME_OFFSET + SPARC_STACK_BIAS + 7) & -8;
+ apparent_fsize += n_regs * 4;
+ actual_fsize = apparent_fsize + ((outgoing_args_size + 7) & -8);
+ }
+
+ /* Make sure nothing can clobber our register windows.
+ If a SAVE must be done, or there is a stack-local variable,
+ the register window area must be allocated.
+ ??? For v8 we apparently need an additional 8 bytes of reserved space. */
+ if (leaf_function == 0 || size > 0)
+ actual_fsize += (16 * UNITS_PER_WORD) + (TARGET_ARCH64 ? 0 : 8);
+
+ return SPARC_STACK_ALIGN (actual_fsize);
+}
+
+/* Build a (32 bit) big number in a register. */
+/* ??? We may be able to use the set macro here too. */
+
+static void
+build_big_number (file, num, reg)
+ FILE *file;
+ int num;
+ char *reg;
+{
+ if (num >= 0 || ! TARGET_ARCH64)
+ {
+ fprintf (file, "\tsethi %%hi(%d),%s\n", num, reg);
+ if ((num & 0x3ff) != 0)
+ fprintf (file, "\tor %s,%%lo(%d),%s\n", reg, num, reg);
+ }
+ else /* num < 0 && TARGET_ARCH64 */
+ {
+ /* Sethi does not sign extend, so we must use a little trickery
+ to use it for negative numbers. Invert the constant before
+ loading it in, then use xor immediate to invert the loaded bits
+ (along with the upper 32 bits) to the desired constant. This
+ works because the sethi and immediate fields overlap. */
+ int asize = num;
+ int inv = ~asize;
+ int low = -0x400 + (asize & 0x3FF);
+
+ fprintf (file, "\tsethi %%hi(%d),%s\n\txor %s,%d,%s\n",
+ inv, reg, reg, low, reg);
+ }
+}
+
+/* Output code for the function prologue. */
+
+void
+output_function_prologue (file, size, leaf_function)
+ FILE *file;
+ int size;
+ int leaf_function;
+{
+ /* Need to use actual_fsize, since we are also allocating
+ space for our callee (and our own register save area). */
+ actual_fsize = compute_frame_size (size, leaf_function);
+
+ if (leaf_function)
+ {
+ frame_base_name = "%sp";
+ frame_base_offset = actual_fsize + SPARC_STACK_BIAS;
+ }
+ else
+ {
+ frame_base_name = "%fp";
+ frame_base_offset = SPARC_STACK_BIAS;
+ }
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
+
+ if (actual_fsize == 0)
+ /* do nothing. */ ;
+ else if (! leaf_function && ! TARGET_BROKEN_SAVERESTORE)
+ {
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tsave %%sp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tsave %%sp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tsave %%sp,%%g1,%%sp\n");
+ }
+ }
+ else if (! leaf_function && TARGET_BROKEN_SAVERESTORE)
+ {
+ /* We assume the environment will properly handle or otherwise avoid
+ trouble associated with an interrupt occurring after the `save' or
+ trap occurring during it. */
+ fprintf (file, "\tsave\n");
+
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tadd %%fp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tadd %%fp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%fp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tadd %%fp,%%g1,%%sp\n");
+ }
+ }
+ else /* leaf function */
+ {
+ if (actual_fsize <= 4096)
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize);
+ else if (actual_fsize <= 8192)
+ {
+ fprintf (file, "\tadd %%sp,-4096,%%sp\n");
+ fprintf (file, "\tadd %%sp,-%d,%%sp\n", actual_fsize - 4096);
+ }
+ else
+ {
+ build_big_number (file, -actual_fsize, "%g1");
+ fprintf (file, "\tadd %%sp,%%g1,%%sp\n");
+ }
+ }
+
+ if (dwarf2out_do_frame () && actual_fsize)
+ {
+ char *label = dwarf2out_cfi_label ();
+
+ /* The canonical frame address refers to the top of the frame. */
+ dwarf2out_def_cfa (label, (leaf_function ? STACK_POINTER_REGNUM
+ : FRAME_POINTER_REGNUM),
+ frame_base_offset);
+
+ if (! leaf_function)
+ {
+ /* Note the register window save. This tells the unwinder that
+ it needs to restore the window registers from the previous
+ frame's window save area at 0(cfa). */
+ dwarf2out_window_save (label);
+
+ /* The return address (-8) is now in %i7. */
+ dwarf2out_return_reg (label, 31);
+ }
+ }
+
+ /* If doing anything with PIC, do it now. */
+ if (! flag_pic)
+ fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
+
+ /* Call saved registers are saved just above the outgoing argument area. */
+ if (num_gfregs)
+ {
+ int offset, real_offset, n_regs;
+ char *base;
+
+ real_offset = -apparent_fsize;
+ offset = -apparent_fsize + frame_base_offset;
+ if (offset < -4096 || offset + num_gfregs * 4 > 4096)
+ {
+ /* ??? This might be optimized a little as %g1 might already have a
+ value close enough that a single add insn will do. */
+ /* ??? Although, all of this is probably only a temporary fix
+ because if %g1 can hold a function result, then
+ output_function_epilogue will lose (the result will get
+ clobbered). */
+ build_big_number (file, offset, "%g1");
+ fprintf (file, "\tadd %s,%%g1,%%g1\n", frame_base_name);
+ base = "%g1";
+ offset = 0;
+ }
+ else
+ {
+ base = frame_base_name;
+ }
+
+ n_regs = 0;
+ if (TARGET_EPILOGUE && ! leaf_function)
+ /* ??? Originally saved regs 0-15 here. */
+ n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
+ else if (leaf_function)
+ /* ??? Originally saved regs 0-31 here. */
+ n_regs = save_regs (file, 0, 8, base, offset, 0, real_offset);
+ if (TARGET_EPILOGUE)
+ save_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs,
+ real_offset);
+ }
+
+ leaf_label = 0;
+ if (leaf_function && actual_fsize != 0)
+ {
+ /* warning ("leaf procedure with frame size %d", actual_fsize); */
+ if (! TARGET_EPILOGUE)
+ leaf_label = gen_label_rtx ();
+ }
+}
+
+/* Output code for the function epilogue. */
+
+void
+output_function_epilogue (file, size, leaf_function)
+ FILE *file;
+ int size ATTRIBUTE_UNUSED;
+ int leaf_function;
+{
+ char *ret;
+
+ if (leaf_label)
+ {
+ emit_label_after (leaf_label, get_last_insn ());
+ final_scan_insn (get_last_insn (), file, 0, 0, 1);
+ }
+
+#ifdef FUNCTION_BLOCK_PROFILER_EXIT
+ else if (profile_block_flag == 2)
+ {
+ FUNCTION_BLOCK_PROFILER_EXIT(file);
+ }
+#endif
+
+ else if (current_function_epilogue_delay_list == 0)
+ {
+ /* If code does not drop into the epilogue, do nothing. */
+ rtx insn = get_last_insn ();
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ return;
+ }
+
+ /* Restore any call saved registers. */
+ if (num_gfregs)
+ {
+ int offset, n_regs;
+ char *base;
+
+ offset = -apparent_fsize + frame_base_offset;
+ if (offset < -4096 || offset + num_gfregs * 4 > 4096 - 8 /*double*/)
+ {
+ build_big_number (file, offset, "%g1");
+ fprintf (file, "\tadd %s,%%g1,%%g1\n", frame_base_name);
+ base = "%g1";
+ offset = 0;
+ }
+ else
+ {
+ base = frame_base_name;
+ }
+
+ n_regs = 0;
+ if (TARGET_EPILOGUE && ! leaf_function)
+ /* ??? Originally saved regs 0-15 here. */
+ n_regs = restore_regs (file, 0, 8, base, offset, 0);
+ else if (leaf_function)
+ /* ??? Originally saved regs 0-31 here. */
+ n_regs = restore_regs (file, 0, 8, base, offset, 0);
+ if (TARGET_EPILOGUE)
+ restore_regs (file, 32, TARGET_V9 ? 96 : 64, base, offset, n_regs);
+ }
+
+ /* Work out how to skip the caller's unimp instruction if required. */
+ if (leaf_function)
+ ret = (SKIP_CALLERS_UNIMP_P ? "jmp %o7+12" : "retl");
+ else
+ ret = (SKIP_CALLERS_UNIMP_P ? "jmp %i7+12" : "ret");
+
+ if (TARGET_EPILOGUE || leaf_label)
+ {
+ int old_target_epilogue = TARGET_EPILOGUE;
+ target_flags &= ~old_target_epilogue;
+
+ if (! leaf_function)
+ {
+ /* If we wound up with things in our delay slot, flush them here. */
+ if (current_function_epilogue_delay_list)
+ {
+ rtx insn = emit_jump_insn_after (gen_rtx_RETURN (VOIDmode),
+ get_last_insn ());
+ PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ PATTERN (XEXP (current_function_epilogue_delay_list, 0)),
+ PATTERN (insn)));
+ final_scan_insn (insn, file, 1, 0, 1);
+ }
+ else if (TARGET_V9 && ! SKIP_CALLERS_UNIMP_P)
+ fputs ("\treturn %i7+8\n\tnop\n", file);
+ else
+ fprintf (file, "\t%s\n\trestore\n", ret);
+ }
+ /* All of the following cases are for leaf functions. */
+ else if (current_function_epilogue_delay_list)
+ {
+ /* eligible_for_epilogue_delay_slot ensures that if this is a
+ leaf function, then we will only have insn in the delay slot
+ if the frame size is zero, thus no adjust for the stack is
+ needed here. */
+ if (actual_fsize != 0)
+ abort ();
+ fprintf (file, "\t%s\n", ret);
+ final_scan_insn (XEXP (current_function_epilogue_delay_list, 0),
+ file, 1, 0, 1);
+ }
+ /* Output 'nop' instead of 'sub %sp,-0,%sp' when no frame, so as to
+ avoid generating confusing assembly language output. */
+ else if (actual_fsize == 0)
+ fprintf (file, "\t%s\n\tnop\n", ret);
+ else if (actual_fsize <= 4096)
+ fprintf (file, "\t%s\n\tsub %%sp,-%d,%%sp\n", ret, actual_fsize);
+ else if (actual_fsize <= 8192)
+ fprintf (file, "\tsub %%sp,-4096,%%sp\n\t%s\n\tsub %%sp,-%d,%%sp\n",
+ ret, actual_fsize - 4096);
+ else if ((actual_fsize & 0x3ff) == 0)
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\t%s\n\tadd %%sp,%%g1,%%sp\n",
+ actual_fsize, ret);
+ else
+ fprintf (file, "\tsethi %%hi(%d),%%g1\n\tor %%g1,%%lo(%d),%%g1\n\t%s\n\tadd %%sp,%%g1,%%sp\n",
+ actual_fsize, actual_fsize, ret);
+ target_flags |= old_target_epilogue;
+ }
+}
+
+/* Functions for handling argument passing.
+
+ For v8 the first six args are normally in registers and the rest are
+ pushed. Any arg that starts within the first 6 words is at least
+ partially passed in a register unless its data type forbids.
+
+ For v9, the argument registers are laid out as an array of 16 elements
+ and arguments are added sequentially. The first 6 int args and up to the
+ first 16 fp args (depending on size) are passed in regs.
+
+ Slot Stack Integral Float Float in structure Double Long Double
+ ---- ----- -------- ----- ------------------ ------ -----------
+ 15 [SP+248] %f31 %f30,%f31 %d30
+ 14 [SP+240] %f29 %f28,%f29 %d28 %q28
+ 13 [SP+232] %f27 %f26,%f27 %d26
+ 12 [SP+224] %f25 %f24,%f25 %d24 %q24
+ 11 [SP+216] %f23 %f22,%f23 %d22
+ 10 [SP+208] %f21 %f20,%f21 %d20 %q20
+ 9 [SP+200] %f19 %f18,%f19 %d18
+ 8 [SP+192] %f17 %f16,%f17 %d16 %q16
+ 7 [SP+184] %f15 %f14,%f15 %d14
+ 6 [SP+176] %f13 %f12,%f13 %d12 %q12
+ 5 [SP+168] %o5 %f11 %f10,%f11 %d10
+ 4 [SP+160] %o4 %f9 %f8,%f9 %d8 %q8
+ 3 [SP+152] %o3 %f7 %f6,%f7 %d6
+ 2 [SP+144] %o2 %f5 %f4,%f5 %d4 %q4
+ 1 [SP+136] %o1 %f3 %f2,%f3 %d2
+ 0 [SP+128] %o0 %f1 %f0,%f1 %d0 %q0
+
+ Here SP = %sp if -mno-stack-bias or %sp+stack_bias otherwise.
+
+ Integral arguments are always passed as 64 bit quantities appropriately
+ extended.
+
+ Passing of floating point values is handled as follows.
+ If a prototype is in scope:
+ If the value is in a named argument (i.e. not a stdarg function or a
+ value not part of the `...') then the value is passed in the appropriate
+ fp reg.
+ If the value is part of the `...' and is passed in one of the first 6
+ slots then the value is passed in the appropriate int reg.
+ If the value is part of the `...' and is not passed in one of the first 6
+ slots then the value is passed in memory.
+ If a prototype is not in scope:
+ If the value is one of the first 6 arguments the value is passed in the
+ appropriate integer reg and the appropriate fp reg.
+ If the value is not one of the first 6 arguments the value is passed in
+ the appropriate fp reg and in memory.
+ */
+
+/* Maximum number of int regs for args. */
+#define SPARC_INT_ARG_MAX 6
+/* Maximum number of fp regs for args. */
+#define SPARC_FP_ARG_MAX 16
+
+#define ROUND_ADVANCE(SIZE) (((SIZE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Handle the INIT_CUMULATIVE_ARGS macro.
+ Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+void
+init_cumulative_args (cum, fntype, libname, indirect)
+ CUMULATIVE_ARGS *cum;
+ tree fntype;
+ tree libname ATTRIBUTE_UNUSED;
+ int indirect ATTRIBUTE_UNUSED;
+{
+ cum->words = 0;
+ cum->prototype_p = fntype && TYPE_ARG_TYPES (fntype);
+ cum->libcall_p = fntype == 0;
+}
+
+/* Compute the slot number to pass an argument in.
+ Returns the slot number or -1 if passing on the stack.
+
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+ INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG.
+ *PREGNO records the register number to use if scalar type.
+ *PPADDING records the amount of padding needed in words. */
+
+static int
+function_arg_slotno (cum, mode, type, named, incoming_p, pregno, ppadding)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+ int incoming_p;
+ int *pregno;
+ int *ppadding;
+{
+ int regbase = (incoming_p
+ ? SPARC_INCOMING_INT_ARG_FIRST
+ : SPARC_OUTGOING_INT_ARG_FIRST);
+ int slotno = cum->words;
+ int regno;
+
+ *ppadding = 0;
+
+ if (type != 0 && TREE_ADDRESSABLE (type))
+ return -1;
+ if (TARGET_ARCH32
+ && type != 0 && mode == BLKmode
+ && TYPE_ALIGN (type) % PARM_BOUNDARY != 0)
+ return -1;
+
+ switch (mode)
+ {
+ case VOIDmode :
+ /* MODE is VOIDmode when generating the actual call.
+ See emit_call_1. */
+ return -1;
+
+ case QImode : case CQImode :
+ case HImode : case CHImode :
+ case SImode : case CSImode :
+ case DImode : case CDImode :
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ break;
+
+ case SFmode : case SCmode :
+ case DFmode : case DCmode :
+ case TFmode : case TCmode :
+ if (TARGET_ARCH32)
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ else
+ {
+ if ((mode == TFmode || mode == TCmode)
+ && (slotno & 1) != 0)
+ slotno++, *ppadding = 1;
+ if (TARGET_FPU && named)
+ {
+ if (slotno >= SPARC_FP_ARG_MAX)
+ return -1;
+ regno = SPARC_FP_ARG_FIRST + slotno * 2;
+ if (mode == SFmode)
+ regno++;
+ }
+ else
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ }
+ break;
+
+ case BLKmode :
+ /* For sparc64, objects requiring 16 byte alignment get it. */
+ if (TARGET_ARCH64)
+ {
+ if (type && TYPE_ALIGN (type) == 128 && (slotno & 1) != 0)
+ slotno++, *ppadding = 1;
+ }
+
+ if (TARGET_ARCH32
+ || (type && TREE_CODE (type) == UNION_TYPE))
+ {
+ if (slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ regno = regbase + slotno;
+ }
+ else
+ {
+ tree field;
+ int intregs_p = 0, fpregs_p = 0;
+ /* The ABI obviously doesn't specify how packed
+ structures are passed. These are defined to be passed
+ in int regs if possible, otherwise memory. */
+ int packed_p = 0;
+
+ /* First see what kinds of registers we need. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU)
+ fpregs_p = 1;
+ else
+ intregs_p = 1;
+ if (DECL_PACKED (field))
+ packed_p = 1;
+ }
+ }
+ if (packed_p || !named)
+ fpregs_p = 0, intregs_p = 1;
+
+ /* If all arg slots are filled, then must pass on stack. */
+ if (fpregs_p && slotno >= SPARC_FP_ARG_MAX)
+ return -1;
+ /* If there are only int args and all int arg slots are filled,
+ then must pass on stack. */
+ if (!fpregs_p && intregs_p && slotno >= SPARC_INT_ARG_MAX)
+ return -1;
+ /* Note that even if all int arg slots are filled, fp members may
+ still be passed in regs if such regs are available.
+ *PREGNO isn't set because there may be more than one, it's up
+ to the caller to compute them. */
+ return slotno;
+ }
+ break;
+
+ default :
+ abort ();
+ }
+
+ *pregno = regno;
+ return slotno;
+}
+
+/* Handle recursive register counting for structure field layout. */
+
+struct function_arg_record_value_parms
+{
+ rtx ret;
+ int slotno, named, regbase;
+ int nregs, intoffset;
+};
+
+static void
+function_arg_record_value_1 (type, startbitpos, parms)
+ tree type;
+ int startbitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ tree field;
+
+ /* The ABI obviously doesn't specify how packed structures are
+ passed. These are defined to be passed in int regs if possible,
+ otherwise memory. */
+ int packed_p = 0;
+
+ /* We need to compute how many registers are needed so we can
+ allocate the PARALLEL but before we can do that we need to know
+ whether there are any packed fields. If there are, int regs are
+ used regardless of whether there are fp values present. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
+ {
+ packed_p = 1;
+ break;
+ }
+ }
+
+ /* Compute how many registers we need. */
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int bitpos = startbitpos;
+ if (DECL_FIELD_BITPOS (field))
+ bitpos += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
+ {
+ function_arg_record_value_1 (TREE_TYPE (field), bitpos, parms);
+ }
+ else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU
+ && ! packed_p
+ && parms->named)
+ {
+ if (parms->intoffset != -1)
+ {
+ int intslots, this_slotno;
+
+ intslots = (bitpos - parms->intoffset + BITS_PER_WORD - 1)
+ / BITS_PER_WORD;
+ this_slotno = parms->slotno + parms->intoffset
+ / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ intslots = MAX (intslots, 0);
+ parms->nregs += intslots;
+ parms->intoffset = -1;
+ }
+
+ /* There's no need to check this_slotno < SPARC_FP_ARG MAX.
+ If it wasn't true we wouldn't be here. */
+ parms->nregs += 1;
+ }
+ else
+ {
+ if (parms->intoffset == -1)
+ parms->intoffset = bitpos;
+ }
+ }
+ }
+}
+
+/* Handle recursive structure field register assignment. */
+
+static void
+function_arg_record_value_3 (bitpos, parms)
+ int bitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ enum machine_mode mode;
+ int regno, this_slotno, intslots, intoffset;
+ rtx reg;
+
+ if (parms->intoffset == -1)
+ return;
+ intoffset = parms->intoffset;
+ parms->intoffset = -1;
+
+ intslots = (bitpos - intoffset + BITS_PER_WORD - 1) / BITS_PER_WORD;
+ this_slotno = parms->slotno + intoffset / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ if (intslots <= 0)
+ return;
+
+ /* If this is the trailing part of a word, only load that much into
+ the register. Otherwise load the whole register. Note that in
+ the latter case we may pick up unwanted bits. It's not a problem
+ at the moment but may wish to revisit. */
+
+ if (intoffset % BITS_PER_WORD != 0)
+ {
+ mode = mode_for_size (BITS_PER_WORD - intoffset%BITS_PER_WORD,
+ MODE_INT, 0);
+ }
+ else
+ mode = word_mode;
+
+ intoffset /= BITS_PER_UNIT;
+ do
+ {
+ regno = parms->regbase + this_slotno;
+ reg = gen_rtx_REG (mode, regno);
+ XVECEXP (parms->ret, 0, parms->nregs)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
+
+ this_slotno += 1;
+ intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
+ parms->nregs += 1;
+ intslots -= 1;
+ }
+ while (intslots > 0);
+}
+
+static void
+function_arg_record_value_2 (type, startbitpos, parms)
+ tree type;
+ int startbitpos;
+ struct function_arg_record_value_parms *parms;
+{
+ tree field;
+ int packed_p = 0;
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL && DECL_PACKED (field))
+ {
+ packed_p = 1;
+ break;
+ }
+ }
+
+ for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
+ {
+ if (TREE_CODE (field) == FIELD_DECL)
+ {
+ int bitpos = startbitpos;
+ if (DECL_FIELD_BITPOS (field))
+ bitpos += TREE_INT_CST_LOW (DECL_FIELD_BITPOS (field));
+ /* ??? FIXME: else assume zero offset. */
+
+ if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE)
+ {
+ function_arg_record_value_2 (TREE_TYPE (field), bitpos, parms);
+ }
+ else if (TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
+ && TARGET_FPU
+ && ! packed_p
+ && parms->named)
+ {
+ int this_slotno = parms->slotno + bitpos / BITS_PER_WORD;
+ rtx reg;
+
+ function_arg_record_value_3 (bitpos, parms);
+
+ reg = gen_rtx_REG (DECL_MODE (field),
+ (SPARC_FP_ARG_FIRST + this_slotno * 2
+ + (DECL_MODE (field) == SFmode
+ && (bitpos & 32) != 0)));
+ XVECEXP (parms->ret, 0, parms->nregs)
+ = gen_rtx_EXPR_LIST (VOIDmode, reg,
+ GEN_INT (bitpos / BITS_PER_UNIT));
+ parms->nregs += 1;
+ }
+ else
+ {
+ if (parms->intoffset == -1)
+ parms->intoffset = bitpos;
+ }
+ }
+ }
+}
+
+static rtx
+function_arg_record_value (type, mode, slotno, named, regbase)
+ tree type;
+ enum machine_mode mode;
+ int slotno, named, regbase;
+{
+ HOST_WIDE_INT typesize = int_size_in_bytes (type);
+ struct function_arg_record_value_parms parms;
+ int nregs;
+
+ parms.ret = NULL_RTX;
+ parms.slotno = slotno;
+ parms.named = named;
+ parms.regbase = regbase;
+
+ /* Compute how many registers we need. */
+ parms.nregs = 0;
+ parms.intoffset = 0;
+ function_arg_record_value_1 (type, 0, &parms);
+
+ if (parms.intoffset != -1)
+ {
+ int intslots, this_slotno;
+
+ intslots = (typesize*BITS_PER_UNIT - parms.intoffset + BITS_PER_WORD - 1)
+ / BITS_PER_WORD;
+ this_slotno = slotno + parms.intoffset / BITS_PER_WORD;
+
+ intslots = MIN (intslots, SPARC_INT_ARG_MAX - this_slotno);
+ intslots = MAX (intslots, 0);
+
+ parms.nregs += intslots;
+ }
+ nregs = parms.nregs;
+
+ /* Allocate the vector and handle some annoying special cases. */
+ if (nregs == 0)
+ {
+ /* ??? Empty structure has no value? Duh? */
+ if (typesize <= 0)
+ {
+ /* Though there's nothing really to store, return a word register
+ anyway so the rest of gcc doesn't go nuts. Returning a PARALLEL
+ leads to breakage due to the fact that there are zero bytes to
+ load. */
+ return gen_rtx_REG (mode, regbase);
+ }
+ else
+ {
+ /* ??? C++ has structures with no fields, and yet a size. Give up
+ for now and pass everything back in integer registers. */
+ nregs = (typesize + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+ }
+ if (nregs + slotno > SPARC_INT_ARG_MAX)
+ nregs = SPARC_INT_ARG_MAX - slotno;
+ }
+ if (nregs == 0)
+ abort();
+
+ parms.ret = gen_rtx_PARALLEL (mode, rtvec_alloc (nregs));
+
+ /* Fill in the entries. */
+ parms.nregs = 0;
+ parms.intoffset = 0;
+ function_arg_record_value_2 (type, 0, &parms);
+ function_arg_record_value_3 (typesize * BITS_PER_UNIT, &parms);
+
+ if (parms.nregs != nregs)
+ abort ();
+
+ return parms.ret;
+}
+
+/* Handle the FUNCTION_ARG macro.
+ Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis).
+ INCOMING_P is zero for FUNCTION_ARG, nonzero for FUNCTION_INCOMING_ARG. */
+
+rtx
+function_arg (cum, mode, type, named, incoming_p)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+ int incoming_p;
+{
+ int regbase = (incoming_p
+ ? SPARC_INCOMING_INT_ARG_FIRST
+ : SPARC_OUTGOING_INT_ARG_FIRST);
+ int slotno, regno, padding;
+ rtx reg;
+
+ slotno = function_arg_slotno (cum, mode, type, named, incoming_p,
+ &regno, &padding);
+
+ if (slotno == -1)
+ return 0;
+
+ if (TARGET_ARCH32)
+ {
+ reg = gen_rtx_REG (mode, regno);
+ return reg;
+ }
+
+ /* v9 fp args in reg slots beyond the int reg slots get passed in regs
+ but also have the slot allocated for them.
+ If no prototype is in scope fp values in register slots get passed
+ in two places, either fp regs and int regs or fp regs and memory. */
+ if ((GET_MODE_CLASS (mode) == MODE_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ && SPARC_FP_REG_P (regno))
+ {
+ reg = gen_rtx_REG (mode, regno);
+ if (cum->prototype_p || cum->libcall_p)
+ {
+ /* "* 2" because fp reg numbers are recorded in 4 byte
+ quantities. */
+#if 0
+ /* ??? This will cause the value to be passed in the fp reg and
+ in the stack. When a prototype exists we want to pass the
+ value in the reg but reserve space on the stack. That's an
+ optimization, and is deferred [for a bit]. */
+ if ((regno - SPARC_FP_ARG_FIRST) >= SPARC_INT_ARG_MAX * 2)
+ return gen_rtx_PARALLEL (mode,
+ gen_rtvec (2,
+ gen_rtx_EXPR_LIST (VOIDmode,
+ NULL_RTX, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode,
+ reg, const0_rtx)));
+ else
+#else
+ /* ??? It seems that passing back a register even when past
+ the area declared by REG_PARM_STACK_SPACE will allocate
+ space appropriately, and will not copy the data onto the
+ stack, exactly as we desire.
+
+ This is due to locate_and_pad_parm being called in
+ expand_call whenever reg_parm_stack_space > 0, which
+ while benefical to our example here, would seem to be
+ in error from what had been intended. Ho hum... -- r~ */
+#endif
+ return reg;
+ }
+ else
+ {
+ rtx v0, v1;
+
+ if ((regno - SPARC_FP_ARG_FIRST) < SPARC_INT_ARG_MAX * 2)
+ {
+ int intreg;
+
+ /* On incoming, we don't need to know that the value
+ is passed in %f0 and %i0, and it confuses other parts
+ causing needless spillage even on the simplest cases. */
+ if (incoming_p)
+ return reg;
+
+ intreg = (SPARC_OUTGOING_INT_ARG_FIRST
+ + (regno - SPARC_FP_ARG_FIRST) / 2);
+
+ v0 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+ v1 = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, intreg),
+ const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
+ }
+ else
+ {
+ v0 = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
+ v1 = gen_rtx_EXPR_LIST (VOIDmode, reg, const0_rtx);
+ return gen_rtx_PARALLEL (mode, gen_rtvec (2, v0, v1));
+ }
+ }
+ }
+ else if (type && TREE_CODE (type) == RECORD_TYPE)
+ {
+ /* Structures up to 16 bytes in size are passed in arg slots on the
+ stack and are promoted to registers where possible. */
+
+ if (int_size_in_bytes (type) > 16)
+ abort (); /* shouldn't get here */
+
+ return function_arg_record_value (type, mode, slotno, named, regbase);
+ }
+ else if (type && TREE_CODE (type) == UNION_TYPE)
+ {
+ enum machine_mode mode;
+ int bytes = int_size_in_bytes (type);
+
+ if (bytes > 16)
+ abort ();
+
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
+ reg = gen_rtx_REG (mode, regno);
+ }
+ else
+ {
+ /* Scalar or complex int. */
+ reg = gen_rtx_REG (mode, regno);
+ }
+
+ return reg;
+}
+
+/* Handle the FUNCTION_ARG_PARTIAL_NREGS macro.
+ For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero.
+
+ Any arg that starts in the first 6 regs but won't entirely fit in them
+ needs partial registers on v8. On v9, structures with integer
+ values in arg slots 5,6 will be passed in %o5 and SP+176, and complex fp
+ values that begin in the last fp reg [where "last fp reg" varies with the
+ mode] will be split between that reg and memory. */
+
+int
+function_arg_partial_nregs (cum, mode, type, named)
+ const CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int slotno, regno, padding;
+
+ /* We pass 0 for incoming_p here, it doesn't matter. */
+ slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
+
+ if (slotno == -1)
+ return 0;
+
+ if (TARGET_ARCH32)
+ {
+ if ((slotno + (mode == BLKmode
+ ? ROUND_ADVANCE (int_size_in_bytes (type))
+ : ROUND_ADVANCE (GET_MODE_SIZE (mode))))
+ > NPARM_REGS (SImode))
+ return NPARM_REGS (SImode) - slotno;
+ return 0;
+ }
+ else
+ {
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ int size = int_size_in_bytes (type);
+ int align = TYPE_ALIGN (type);
+
+ if (align == 16)
+ slotno += slotno & 1;
+ if (size > 8 && size <= 16
+ && slotno == SPARC_INT_ARG_MAX - 1)
+ return 1;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT
+ || (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
+ && ! TARGET_FPU))
+ {
+ if (GET_MODE_ALIGNMENT (mode) == 128)
+ {
+ slotno += slotno & 1;
+ if (slotno == SPARC_INT_ARG_MAX - 2)
+ return 1;
+ }
+ else
+ {
+ if (slotno == SPARC_INT_ARG_MAX - 1)
+ return 1;
+ }
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ if (GET_MODE_ALIGNMENT (mode) == 128)
+ slotno += slotno & 1;
+ if ((slotno + GET_MODE_SIZE (mode) / UNITS_PER_WORD)
+ > SPARC_FP_ARG_MAX)
+ return 1;
+ }
+ return 0;
+ }
+}
+
+/* Handle the FUNCTION_ARG_PASS_BY_REFERENCE macro.
+ !v9: The SPARC ABI stipulates passing struct arguments (of any size) and
+ quad-precision floats by invisible reference.
+ v9: Aggregates greater than 16 bytes are passed by reference.
+ For Pascal, also pass arrays by reference. */
+
+int
+function_arg_pass_by_reference (cum, mode, type, named)
+ const CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED;
+ enum machine_mode mode;
+ tree type;
+ int named ATTRIBUTE_UNUSED;
+{
+ if (TARGET_ARCH32)
+ {
+ return ((type && AGGREGATE_TYPE_P (type))
+ || mode == TFmode || mode == TCmode);
+ }
+ else
+ {
+ return ((type && TREE_CODE (type) == ARRAY_TYPE)
+ /* Consider complex values as aggregates, so care for TCmode. */
+ || GET_MODE_SIZE (mode) > 16
+ || (type && AGGREGATE_TYPE_P (type)
+ && int_size_in_bytes (type) > 16));
+ }
+}
+
+/* Handle the FUNCTION_ARG_ADVANCE macro.
+ Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ TYPE is null for libcalls where that information may not be available. */
+
+void
+function_arg_advance (cum, mode, type, named)
+ CUMULATIVE_ARGS *cum;
+ enum machine_mode mode;
+ tree type;
+ int named;
+{
+ int slotno, regno, padding;
+
+ /* We pass 0 for incoming_p here, it doesn't matter. */
+ slotno = function_arg_slotno (cum, mode, type, named, 0, &regno, &padding);
+
+ /* If register required leading padding, add it. */
+ if (slotno != -1)
+ cum->words += padding;
+
+ if (TARGET_ARCH32)
+ {
+ cum->words += (mode != BLKmode
+ ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
+ : ROUND_ADVANCE (int_size_in_bytes (type)));
+ }
+ else
+ {
+ if (type && AGGREGATE_TYPE_P (type))
+ {
+ int size = int_size_in_bytes (type);
+
+ if (size <= 8)
+ ++cum->words;
+ else if (size <= 16)
+ cum->words += 2;
+ else /* passed by reference */
+ ++cum->words;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_INT)
+ {
+ cum->words += 2;
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
+ {
+ cum->words += GET_MODE_SIZE (mode) / UNITS_PER_WORD;
+ }
+ else
+ {
+ cum->words += (mode != BLKmode
+ ? ROUND_ADVANCE (GET_MODE_SIZE (mode))
+ : ROUND_ADVANCE (int_size_in_bytes (type)));
+ }
+ }
+}
+
+/* Handle the FUNCTION_ARG_PADDING macro.
+ For the 64 bit ABI structs are always stored left shifted in their
+ argument slot. */
+
+enum direction
+function_arg_padding (mode, type)
+ enum machine_mode mode;
+ tree type;
+{
+ if (TARGET_ARCH64 && type != 0 && AGGREGATE_TYPE_P (type))
+ return upward;
+
+ /* This is the default definition. */
+ return (! BYTES_BIG_ENDIAN
+ ? upward
+ : ((mode == BLKmode
+ ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
+ && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT))
+ : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
+ ? downward : upward));
+}
+
+/* Handle FUNCTION_VALUE, FUNCTION_OUTGOING_VALUE, and LIBCALL_VALUE macros.
+ For v9, function return values are subject to the same rules as arguments,
+ except that up to 32-bytes may be returned in registers. */
+
+rtx
+function_value (type, mode, incoming_p)
+ tree type;
+ enum machine_mode mode;
+ int incoming_p;
+{
+ int regno;
+ int regbase = (incoming_p
+ ? SPARC_OUTGOING_INT_ARG_FIRST
+ : SPARC_INCOMING_INT_ARG_FIRST);
+
+ if (TARGET_ARCH64 && type)
+ {
+ if (TREE_CODE (type) == RECORD_TYPE)
+ {
+ /* Structures up to 32 bytes in size are passed in registers,
+ promoted to fp registers where possible. */
+
+ if (int_size_in_bytes (type) > 32)
+ abort (); /* shouldn't get here */
+
+ return function_arg_record_value (type, mode, 0, 1, regbase);
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ {
+ int bytes = int_size_in_bytes (type);
+
+ if (bytes > 32)
+ abort ();
+
+ mode = mode_for_size (bytes * BITS_PER_UNIT, MODE_INT, 0);
+ }
+ }
+
+ if (incoming_p)
+ regno = BASE_RETURN_VALUE_REG (mode);
+ else
+ regno = BASE_OUTGOING_VALUE_REG (mode);
+
+ return gen_rtx_REG (mode, regno);
+}
+
+/* Do what is necessary for `va_start'. The argument is ignored.
+
+ We look at the current function to determine if stdarg or varargs
+ is used and return the address of the first unnamed parameter. */
+
+rtx
+sparc_builtin_saveregs (arglist)
+ tree arglist ATTRIBUTE_UNUSED;
+{
+ int first_reg = current_function_args_info.words;
+ rtx address;
+ int regno;
+
+ for (regno = first_reg; regno < NPARM_REGS (word_mode); regno++)
+ emit_move_insn (gen_rtx_MEM (word_mode,
+ gen_rtx_PLUS (Pmode,
+ frame_pointer_rtx,
+ GEN_INT (STACK_POINTER_OFFSET
+ + UNITS_PER_WORD * regno))),
+ gen_rtx_REG (word_mode,
+ BASE_INCOMING_ARG_REG (word_mode) + regno));
+
+ address = gen_rtx_PLUS (Pmode,
+ frame_pointer_rtx,
+ GEN_INT (STACK_POINTER_OFFSET
+ + UNITS_PER_WORD * first_reg));
+
+ if (flag_check_memory_usage
+ && first_reg < NPARM_REGS (word_mode))
+ emit_library_call (chkr_set_right_libfunc, 1, VOIDmode, 3,
+ address, ptr_mode,
+ GEN_INT (UNITS_PER_WORD
+ * (NPARM_REGS (word_mode) - first_reg)),
+ TYPE_MODE (sizetype), GEN_INT (MEMORY_USE_RW),
+ TYPE_MODE (integer_type_node));
+
+ return address;
+}
+
+/* Return the string to output a conditional branch to LABEL, which is
+ the operand number of the label. OP is the conditional expression.
+ XEXP (OP, 0) is assumed to be a condition code register (integer or
+ floating point) and its mode specifies what kind of comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ ANNUL is non-zero if we should generate an annulling branch.
+
+ NOOP is non-zero if we have to follow this branch by a noop.
+
+ INSN, if set, is the insn. */
+
+char *
+output_cbranch (op, label, reversed, annul, noop, insn)
+ rtx op;
+ int label;
+ int reversed, annul, noop;
+ rtx insn;
+{
+ static char string[20];
+ enum rtx_code code = GET_CODE (op);
+ rtx cc_reg = XEXP (op, 0);
+ enum machine_mode mode = GET_MODE (cc_reg);
+ static char v8_labelno[] = " %lX";
+ static char v9_icc_labelno[] = " %%icc,%lX";
+ static char v9_xcc_labelno[] = " %%xcc,%lX";
+ static char v9_fcc_labelno[] = " %%fccX,%lY";
+ char *labelno;
+ int labeloff;
+
+ /* ??? !v9: FP branches cannot be preceded by another floating point insn.
+ Because there is currently no concept of pre-delay slots, we can fix
+ this only by always emitting a nop before a floating point branch. */
+
+ if ((mode == CCFPmode || mode == CCFPEmode) && ! TARGET_V9)
+ strcpy (string, "nop\n\t");
+ else
+ string[0] = '\0';
+
+ /* If not floating-point or if EQ or NE, we can just reverse the code. */
+ if (reversed
+ && ((mode != CCFPmode && mode != CCFPEmode) || code == EQ || code == NE))
+ code = reverse_condition (code), reversed = 0;
+
+ /* Start by writing the branch condition. */
+ switch (code)
+ {
+ case NE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ strcat (string, "fbne");
+ else
+ strcpy (string, "bne");
+ break;
+
+ case EQ:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ strcat (string, "fbe");
+ else
+ strcpy (string, "be");
+ break;
+
+ case GE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbul");
+ else
+ strcat (string, "fbge");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bpos");
+ else
+ strcpy (string, "bge");
+ break;
+
+ case GT:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbule");
+ else
+ strcat (string, "fbg");
+ }
+ else
+ strcpy (string, "bg");
+ break;
+
+ case LE:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbug");
+ else
+ strcat (string, "fble");
+ }
+ else
+ strcpy (string, "ble");
+ break;
+
+ case LT:
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ if (reversed)
+ strcat (string, "fbuge");
+ else
+ strcat (string, "fbl");
+ }
+ else if (mode == CC_NOOVmode)
+ strcpy (string, "bneg");
+ else
+ strcpy (string, "bl");
+ break;
+
+ case GEU:
+ strcpy (string, "bgeu");
+ break;
+
+ case GTU:
+ strcpy (string, "bgu");
+ break;
+
+ case LEU:
+ strcpy (string, "bleu");
+ break;
+
+ case LTU:
+ strcpy (string, "blu");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Now add the annulling, the label, and a possible noop. */
+ if (annul)
+ strcat (string, ",a");
+
+ if (! TARGET_V9)
+ {
+ labeloff = 3;
+ labelno = v8_labelno;
+ }
+ else
+ {
+ rtx note;
+
+ if (insn && (note = find_reg_note (insn, REG_BR_PRED, NULL_RTX)))
+ strcat (string, INTVAL (XEXP (note, 0)) & ATTR_FLAG_likely ? ",pt" : ",pn");
+
+ labeloff = 9;
+ if (mode == CCFPmode || mode == CCFPEmode)
+ {
+ labeloff = 10;
+ labelno = v9_fcc_labelno;
+ /* Set the char indicating the number of the fcc reg to use. */
+ labelno[6] = REGNO (cc_reg) - SPARC_FIRST_V9_FCC_REG + '0';
+ }
+ else if (mode == CCXmode || mode == CCX_NOOVmode)
+ labelno = v9_xcc_labelno;
+ else
+ labelno = v9_icc_labelno;
+ }
+ /* Set the char indicating the number of the operand containing the
+ label_ref. */
+ labelno[labeloff] = label + '0';
+ strcat (string, labelno);
+
+ if (noop)
+ strcat (string, "\n\tnop");
+
+ return string;
+}
+
+/* Return the string to output a conditional branch to LABEL, testing
+ register REG. LABEL is the operand number of the label; REG is the
+ operand number of the reg. OP is the conditional expression. The mode
+ of REG says what kind of comparison we made.
+
+ REVERSED is non-zero if we should reverse the sense of the comparison.
+
+ ANNUL is non-zero if we should generate an annulling branch.
+
+ NOOP is non-zero if we have to follow this branch by a noop. */
+
+char *
+output_v9branch (op, reg, label, reversed, annul, noop)
+ rtx op;
+ int reg, label;
+ int reversed, annul, noop;
+{
+ static char string[20];
+ enum rtx_code code = GET_CODE (op);
+ enum machine_mode mode = GET_MODE (XEXP (op, 0));
+ static char labelno[] = " %X,%lX";
+
+ /* If not floating-point or if EQ or NE, we can just reverse the code. */
+ if (reversed)
+ code = reverse_condition (code), reversed = 0;
+
+ /* Only 64 bit versions of these instructions exist. */
+ if (mode != DImode)
+ abort ();
+
+ /* Start by writing the branch condition. */
+
+ switch (code)
+ {
+ case NE:
+ strcpy (string, "brnz");
+ break;
+
+ case EQ:
+ strcpy (string, "brz");
+ break;
+
+ case GE:
+ strcpy (string, "brgez");
+ break;
+
+ case LT:
+ strcpy (string, "brlz");
+ break;
+
+ case LE:
+ strcpy (string, "brlez");
+ break;
+
+ case GT:
+ strcpy (string, "brgz");
+ break;
+
+ default:
+ abort ();
+ }
+
+ /* Now add the annulling, reg, label, and nop. */
+ if (annul)
+ strcat (string, ",a");
+
+ /* ??? Optional prediction bit ",pt" or ",pf" goes here. */
+
+ labelno[2] = reg + '0';
+ labelno[6] = label + '0';
+ strcat (string, labelno);
+
+ if (noop)
+ strcat (string, "\n\tnop");
+
+ return string;
+}
+
+/* Renumber registers in delay slot. Replace registers instead of
+ renumbering because they may be shared.
+
+ This does not handle instructions other than move. */
+
+static void
+epilogue_renumber (where)
+ rtx *where;
+{
+ rtx x = *where;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case MEM:
+ *where = x = copy_rtx (x);
+ epilogue_renumber (&XEXP (x, 0));
+ return;
+
+ case REG:
+ {
+ int regno = REGNO (x);
+ if (regno > 8 && regno < 24)
+ abort ();
+ if (regno >= 24 && regno < 32)
+ *where = gen_rtx_REG (GET_MODE (x), regno - 16);
+ return;
+ }
+ case CONST_INT:
+ case CONST_DOUBLE:
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return;
+
+ case IOR:
+ case AND:
+ case XOR:
+ case PLUS:
+ case MINUS:
+ epilogue_renumber (&XEXP (x, 1));
+ case NEG:
+ case NOT:
+ epilogue_renumber (&XEXP (x, 0));
+ return;
+
+ default:
+ debug_rtx (*where);
+ abort();
+ }
+}
+
+/* Output assembler code to return from a function. */
+
+char *
+output_return (operands)
+ rtx *operands;
+{
+ rtx delay = final_sequence ? XVECEXP (final_sequence, 0, 1) : 0;
+
+ if (leaf_label)
+ {
+ operands[0] = leaf_label;
+ return "b%* %l0%(";
+ }
+ else if (leaf_function)
+ {
+ /* No delay slot in a leaf function. */
+ if (delay)
+ abort ();
+
+ /* If we didn't allocate a frame pointer for the current function,
+ the stack pointer might have been adjusted. Output code to
+ restore it now. */
+
+ operands[0] = GEN_INT (actual_fsize);
+
+ /* Use sub of negated value in first two cases instead of add to
+ allow actual_fsize == 4096. */
+
+ if (actual_fsize <= 4096)
+ {
+ if (SKIP_CALLERS_UNIMP_P)
+ return "jmp %%o7+12\n\tsub %%sp,-%0,%%sp";
+ else
+ return "retl\n\tsub %%sp,-%0,%%sp";
+ }
+ else if (actual_fsize <= 8192)
+ {
+ operands[0] = GEN_INT (actual_fsize - 4096);
+ if (SKIP_CALLERS_UNIMP_P)
+ return "sub %%sp,-4096,%%sp\n\tjmp %%o7+12\n\tsub %%sp,-%0,%%sp";
+ else
+ return "sub %%sp,-4096,%%sp\n\tretl\n\tsub %%sp,-%0,%%sp";
+ }
+ else if (SKIP_CALLERS_UNIMP_P)
+ {
+ if ((actual_fsize & 0x3ff) != 0)
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tjmp %%o7+12\n\tadd %%sp,%%g1,%%sp";
+ else
+ return "sethi %%hi(%a0),%%g1\n\tjmp %%o7+12\n\tadd %%sp,%%g1,%%sp";
+ }
+ else
+ {
+ if ((actual_fsize & 0x3ff) != 0)
+ return "sethi %%hi(%a0),%%g1\n\tor %%g1,%%lo(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
+ else
+ return "sethi %%hi(%a0),%%g1\n\tretl\n\tadd %%sp,%%g1,%%sp";
+ }
+ }
+ else if (TARGET_V9)
+ {
+ if (delay)
+ {
+ epilogue_renumber (&SET_DEST (PATTERN (delay)));
+ epilogue_renumber (&SET_SRC (PATTERN (delay)));
+ }
+ if (SKIP_CALLERS_UNIMP_P)
+ return "return %%i7+12%#";
+ else
+ return "return %%i7+8%#";
+ }
+ else
+ {
+ if (delay)
+ abort ();
+ if (SKIP_CALLERS_UNIMP_P)
+ return "jmp %%i7+12\n\trestore";
+ else
+ return "ret\n\trestore";
+ }
+}
+
+/* Leaf functions and non-leaf functions have different needs. */
+
+static int
+reg_leaf_alloc_order[] = REG_LEAF_ALLOC_ORDER;
+
+static int
+reg_nonleaf_alloc_order[] = REG_ALLOC_ORDER;
+
+static int *reg_alloc_orders[] = {
+ reg_leaf_alloc_order,
+ reg_nonleaf_alloc_order};
+
+void
+order_regs_for_local_alloc ()
+{
+ static int last_order_nonleaf = 1;
+
+ if (regs_ever_live[15] != last_order_nonleaf)
+ {
+ last_order_nonleaf = !last_order_nonleaf;
+ bcopy ((char *) reg_alloc_orders[last_order_nonleaf],
+ (char *) reg_alloc_order, FIRST_PSEUDO_REGISTER * sizeof (int));
+ }
+}
+
+/* Return 1 if REGNO (reg1) is even and REGNO (reg1) == REGNO (reg2) - 1.
+ This makes them candidates for using ldd and std insns.
+
+ Note reg1 and reg2 *must* be hard registers. */
+
+int
+registers_ok_for_ldd_peep (reg1, reg2)
+ rtx reg1, reg2;
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
+ return 0;
+
+ if (REGNO (reg1) % 2 != 0)
+ return 0;
+
+ /* Integer ldd is deprecated in SPARC V9 */
+ if (TARGET_V9 && REGNO (reg1) < 32)
+ return 0;
+
+ return (REGNO (reg1) == REGNO (reg2) - 1);
+}
+
+/* Return 1 if addr1 and addr2 are suitable for use in an ldd or
+ std insn.
+
+ This can only happen when addr1 and addr2 are consecutive memory
+ locations (addr1 + 4 == addr2). addr1 must also be aligned on a
+ 64 bit boundary (addr1 % 8 == 0).
+
+ We know %sp and %fp are kept aligned on a 64 bit boundary. Other
+ registers are assumed to *never* be properly aligned and are
+ rejected.
+
+ Knowing %sp and %fp are kept aligned on a 64 bit boundary, we
+ need only check that the offset for addr1 % 8 == 0. */
+
+int
+addrs_ok_for_ldd_peep (addr1, addr2)
+ rtx addr1, addr2;
+{
+ int reg1, offset1;
+
+ /* Extract a register number and offset (if used) from the first addr. */
+ if (GET_CODE (addr1) == PLUS)
+ {
+ /* If not a REG, return zero. */
+ if (GET_CODE (XEXP (addr1, 0)) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (XEXP (addr1, 0));
+ /* The offset must be constant! */
+ if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
+ return 0;
+ offset1 = INTVAL (XEXP (addr1, 1));
+ }
+ }
+ else if (GET_CODE (addr1) != REG)
+ return 0;
+ else
+ {
+ reg1 = REGNO (addr1);
+ /* This was a simple (mem (reg)) expression. Offset is 0. */
+ offset1 = 0;
+ }
+
+ /* Make sure the second address is a (mem (plus (reg) (const_int). */
+ if (GET_CODE (addr2) != PLUS)
+ return 0;
+
+ if (GET_CODE (XEXP (addr2, 0)) != REG
+ || GET_CODE (XEXP (addr2, 1)) != CONST_INT)
+ return 0;
+
+ /* Only %fp and %sp are allowed. Additionally both addresses must
+ use the same register. */
+ if (reg1 != FRAME_POINTER_REGNUM && reg1 != STACK_POINTER_REGNUM)
+ return 0;
+
+ if (reg1 != REGNO (XEXP (addr2, 0)))
+ return 0;
+
+ /* The first offset must be evenly divisible by 8 to ensure the
+ address is 64 bit aligned. */
+ if (offset1 % 8 != 0)
+ return 0;
+
+ /* The offset for the second addr must be 4 more than the first addr. */
+ if (INTVAL (XEXP (addr2, 1)) != offset1 + 4)
+ return 0;
+
+ /* All the tests passed. addr1 and addr2 are valid for ldd and std
+ instructions. */
+ return 1;
+}
+
+/* Return 1 if reg is a pseudo, or is the first register in
+ a hard register pair. This makes it a candidate for use in
+ ldd and std insns. */
+
+int
+register_ok_for_ldd (reg)
+ rtx reg;
+{
+ /* We might have been passed a SUBREG. */
+ if (GET_CODE (reg) != REG)
+ return 0;
+
+ if (REGNO (reg) < FIRST_PSEUDO_REGISTER)
+ return (REGNO (reg) % 2 == 0);
+ else
+ return 1;
+}
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+void
+print_operand (file, x, code)
+ FILE *file;
+ rtx x;
+ int code;
+{
+ switch (code)
+ {
+ case '#':
+ /* Output a 'nop' if there's nothing for the delay slot. */
+ if (dbr_sequence_length () == 0)
+ fputs ("\n\tnop", file);
+ return;
+ case '*':
+ /* Output an annul flag if there's nothing for the delay slot and we
+ are optimizing. This is always used with '(' below. */
+ /* Sun OS 4.1.1 dbx can't handle an annulled unconditional branch;
+ this is a dbx bug. So, we only do this when optimizing. */
+ /* On UltraSPARC, a branch in a delay slot causes a pipeline flush.
+ Always emit a nop in case the next instruction is a branch. */
+ if (dbr_sequence_length () == 0
+ && (optimize && (int)sparc_cpu < PROCESSOR_V9))
+ fputs (",a", file);
+ return;
+ case '(':
+ /* Output a 'nop' if there's nothing for the delay slot and we are
+ not optimizing. This is always used with '*' above. */
+ if (dbr_sequence_length () == 0
+ && ! (optimize && (int)sparc_cpu < PROCESSOR_V9))
+ fputs ("\n\tnop", file);
+ return;
+ case '_':
+ /* Output the Embedded Medium/Anywhere code model base register. */
+ fputs (EMBMEDANY_BASE_REG, file);
+ return;
+ case '@':
+ /* Print out what we are using as the frame pointer. This might
+ be %fp, or might be %sp+offset. */
+ /* ??? What if offset is too big? Perhaps the caller knows it isn't? */
+ fprintf (file, "%s+%d", frame_base_name, frame_base_offset);
+ return;
+ case 'Y':
+ /* Adjust the operand to take into account a RESTORE operation. */
+ if (GET_CODE (x) == CONST_INT)
+ break;
+ else if (GET_CODE (x) != REG)
+ output_operand_lossage ("Invalid %%Y operand");
+ else if (REGNO (x) < 8)
+ fputs (reg_names[REGNO (x)], file);
+ else if (REGNO (x) >= 24 && REGNO (x) < 32)
+ fputs (reg_names[REGNO (x)-16], file);
+ else
+ output_operand_lossage ("Invalid %%Y operand");
+ return;
+ case 'L':
+ /* Print out the low order register name of a register pair. */
+ if (WORDS_BIG_ENDIAN)
+ fputs (reg_names[REGNO (x)+1], file);
+ else
+ fputs (reg_names[REGNO (x)], file);
+ return;
+ case 'H':
+ /* Print out the high order register name of a register pair. */
+ if (WORDS_BIG_ENDIAN)
+ fputs (reg_names[REGNO (x)], file);
+ else
+ fputs (reg_names[REGNO (x)+1], file);
+ return;
+ case 'R':
+ /* Print out the second register name of a register pair or quad.
+ I.e., R (%o0) => %o1. */
+ fputs (reg_names[REGNO (x)+1], file);
+ return;
+ case 'S':
+ /* Print out the third register name of a register quad.
+ I.e., S (%o0) => %o2. */
+ fputs (reg_names[REGNO (x)+2], file);
+ return;
+ case 'T':
+ /* Print out the fourth register name of a register quad.
+ I.e., T (%o0) => %o3. */
+ fputs (reg_names[REGNO (x)+3], file);
+ return;
+ case 'x':
+ /* Print a condition code register. */
+ if (REGNO (x) == SPARC_ICC_REG)
+ {
+ /* We don't handle CC[X]_NOOVmode because they're not supposed
+ to occur here. */
+ if (GET_MODE (x) == CCmode)
+ fputs ("%icc", file);
+ else if (GET_MODE (x) == CCXmode)
+ fputs ("%xcc", file);
+ else
+ abort ();
+ }
+ else
+ /* %fccN register */
+ fputs (reg_names[REGNO (x)], file);
+ return;
+ case 'm':
+ /* Print the operand's address only. */
+ output_address (XEXP (x, 0));
+ return;
+ case 'r':
+ /* In this case we need a register. Use %g0 if the
+ operand is const0_rtx. */
+ if (x == const0_rtx
+ || (GET_MODE (x) != VOIDmode && x == CONST0_RTX (GET_MODE (x))))
+ {
+ fputs ("%g0", file);
+ return;
+ }
+ else
+ break;
+
+ case 'A':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("or", file); break;
+ case AND: fputs ("and", file); break;
+ case XOR: fputs ("xor", file); break;
+ default: output_operand_lossage ("Invalid %%A operand");
+ }
+ return;
+
+ case 'B':
+ switch (GET_CODE (x))
+ {
+ case IOR: fputs ("orn", file); break;
+ case AND: fputs ("andn", file); break;
+ case XOR: fputs ("xnor", file); break;
+ default: output_operand_lossage ("Invalid %%B operand");
+ }
+ return;
+
+ /* These are used by the conditional move instructions. */
+ case 'c' :
+ case 'C':
+ {
+ enum rtx_code rc = (code == 'c'
+ ? reverse_condition (GET_CODE (x))
+ : GET_CODE (x));
+ switch (rc)
+ {
+ case NE: fputs ("ne", file); break;
+ case EQ: fputs ("e", file); break;
+ case GE: fputs ("ge", file); break;
+ case GT: fputs ("g", file); break;
+ case LE: fputs ("le", file); break;
+ case LT: fputs ("l", file); break;
+ case GEU: fputs ("geu", file); break;
+ case GTU: fputs ("gu", file); break;
+ case LEU: fputs ("leu", file); break;
+ case LTU: fputs ("lu", file); break;
+ default: output_operand_lossage (code == 'c'
+ ? "Invalid %%c operand"
+ : "Invalid %%C operand");
+ }
+ return;
+ }
+
+ /* These are used by the movr instruction pattern. */
+ case 'd':
+ case 'D':
+ {
+ enum rtx_code rc = (code == 'd'
+ ? reverse_condition (GET_CODE (x))
+ : GET_CODE (x));
+ switch (rc)
+ {
+ case NE: fputs ("ne", file); break;
+ case EQ: fputs ("e", file); break;
+ case GE: fputs ("gez", file); break;
+ case LT: fputs ("lz", file); break;
+ case LE: fputs ("lez", file); break;
+ case GT: fputs ("gz", file); break;
+ default: output_operand_lossage (code == 'd'
+ ? "Invalid %%d operand"
+ : "Invalid %%D operand");
+ }
+ return;
+ }
+
+ case 'b':
+ {
+ /* Print a sign-extended character. */
+ int i = INTVAL (x) & 0xff;
+ if (i & 0x80)
+ i |= 0xffffff00;
+ fprintf (file, "%d", i);
+ return;
+ }
+
+ case 'f':
+ /* Operand must be a MEM; write its address. */
+ if (GET_CODE (x) != MEM)
+ output_operand_lossage ("Invalid %%f operand");
+ output_address (XEXP (x, 0));
+ return;
+
+ case 0:
+ /* Do nothing special. */
+ break;
+
+ default:
+ /* Undocumented flag. */
+ output_operand_lossage ("invalid operand output code");
+ }
+
+ if (GET_CODE (x) == REG)
+ fputs (reg_names[REGNO (x)], file);
+ else if (GET_CODE (x) == MEM)
+ {
+ fputc ('[', file);
+ /* Poor Sun assembler doesn't understand absolute addressing. */
+ if (CONSTANT_P (XEXP (x, 0))
+ && ! TARGET_LIVE_G0)
+ fputs ("%g0+", file);
+ output_address (XEXP (x, 0));
+ fputc (']', file);
+ }
+ else if (GET_CODE (x) == HIGH)
+ {
+ fputs ("%hi(", file);
+ output_addr_const (file, XEXP (x, 0));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == LO_SUM)
+ {
+ print_operand (file, XEXP (x, 0), 0);
+ fputs ("+%lo(", file);
+ output_addr_const (file, XEXP (x, 1));
+ fputc (')', file);
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE
+ && (GET_MODE (x) == VOIDmode
+ || GET_MODE_CLASS (GET_MODE (x)) == MODE_INT))
+ {
+ if (CONST_DOUBLE_HIGH (x) == 0)
+ fprintf (file, "%u", CONST_DOUBLE_LOW (x));
+ else if (CONST_DOUBLE_HIGH (x) == -1
+ && CONST_DOUBLE_LOW (x) < 0)
+ fprintf (file, "%d", CONST_DOUBLE_LOW (x));
+ else
+ output_operand_lossage ("long long constant not a valid immediate operand");
+ }
+ else if (GET_CODE (x) == CONST_DOUBLE)
+ output_operand_lossage ("floating point constant not a valid immediate operand");
+ else { output_addr_const (file, x); }
+}
+
+/* This function outputs assembler code for VALUE to FILE, where VALUE is
+ a 64 bit (DImode) value. */
+
+/* ??? If there is a 64 bit counterpart to .word that the assembler
+ understands, then using that would simply this code greatly. */
+/* ??? We only output .xword's for symbols and only then in environments
+ where the assembler can handle them. */
+
+void
+output_double_int (file, value)
+ FILE *file;
+ rtx value;
+{
+ if (GET_CODE (value) == CONST_INT)
+ {
+ /* ??? This has endianness issues. */
+#if HOST_BITS_PER_WIDE_INT == 64
+ HOST_WIDE_INT xword = INTVAL (value);
+ HOST_WIDE_INT high, low;
+
+ high = (xword >> 32) & 0xffffffff;
+ low = xword & 0xffffffff;
+ ASM_OUTPUT_INT (file, GEN_INT (high));
+ ASM_OUTPUT_INT (file, GEN_INT (low));
+#else
+ if (INTVAL (value) < 0)
+ ASM_OUTPUT_INT (file, constm1_rtx);
+ else
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+#endif
+ }
+ else if (GET_CODE (value) == CONST_DOUBLE)
+ {
+ ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_HIGH (value)));
+ ASM_OUTPUT_INT (file, GEN_INT (CONST_DOUBLE_LOW (value)));
+ }
+ else if (GET_CODE (value) == SYMBOL_REF
+ || GET_CODE (value) == CONST
+ || GET_CODE (value) == PLUS
+ || (TARGET_ARCH64 &&
+ (GET_CODE (value) == LABEL_REF
+ || GET_CODE (value) == CODE_LABEL
+ || GET_CODE (value) == MINUS)))
+ {
+ if (!TARGET_V9 || TARGET_CM_MEDLOW)
+ {
+ ASM_OUTPUT_INT (file, const0_rtx);
+ ASM_OUTPUT_INT (file, value);
+ }
+ else
+ {
+ fprintf (file, "\t%s\t", ASM_LONGLONG);
+ output_addr_const (file, value);
+ fprintf (file, "\n");
+ }
+ }
+ else
+ abort ();
+}
+
+/* Return the value of a code used in the .proc pseudo-op that says
+ what kind of result this function returns. For non-C types, we pick
+ the closest C type. */
+
+#ifndef CHAR_TYPE_SIZE
+#define CHAR_TYPE_SIZE BITS_PER_UNIT
+#endif
+
+#ifndef SHORT_TYPE_SIZE
+#define SHORT_TYPE_SIZE (BITS_PER_UNIT * 2)
+#endif
+
+#ifndef INT_TYPE_SIZE
+#define INT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_TYPE_SIZE
+#define LONG_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef LONG_LONG_TYPE_SIZE
+#define LONG_LONG_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef FLOAT_TYPE_SIZE
+#define FLOAT_TYPE_SIZE BITS_PER_WORD
+#endif
+
+#ifndef DOUBLE_TYPE_SIZE
+#define DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+#ifndef LONG_DOUBLE_TYPE_SIZE
+#define LONG_DOUBLE_TYPE_SIZE (BITS_PER_WORD * 2)
+#endif
+
+unsigned long
+sparc_type_code (type)
+ register tree type;
+{
+ register unsigned long qualifiers = 0;
+ register unsigned shift;
+
+ /* Only the first 30 bits of the qualifier are valid. We must refrain from
+ setting more, since some assemblers will give an error for this. Also,
+ we must be careful to avoid shifts of 32 bits or more to avoid getting
+ unpredictable results. */
+
+ for (shift = 6; shift < 30; shift += 2, type = TREE_TYPE (type))
+ {
+ switch (TREE_CODE (type))
+ {
+ case ERROR_MARK:
+ return qualifiers;
+
+ case ARRAY_TYPE:
+ qualifiers |= (3 << shift);
+ break;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ qualifiers |= (2 << shift);
+ break;
+
+ case POINTER_TYPE:
+ case REFERENCE_TYPE:
+ case OFFSET_TYPE:
+ qualifiers |= (1 << shift);
+ break;
+
+ case RECORD_TYPE:
+ return (qualifiers | 8);
+
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE:
+ return (qualifiers | 9);
+
+ case ENUMERAL_TYPE:
+ return (qualifiers | 10);
+
+ case VOID_TYPE:
+ return (qualifiers | 16);
+
+ case INTEGER_TYPE:
+ /* If this is a range type, consider it to be the underlying
+ type. */
+ if (TREE_TYPE (type) != 0)
+ break;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. We do this by
+ testing TYPE_PRECISION and TREE_UNSIGNED. The old code used to
+ look at both the names and the above fields, but that's redundant.
+ Any type whose size is between two C types will be considered
+ to be the wider of the two types. Also, we do not have a
+ special code to use for "long long", so anything wider than
+ long is treated the same. Note that we can't distinguish
+ between "int" and "long" in this code if they are the same
+ size, but that's fine, since neither can the assembler. */
+
+ if (TYPE_PRECISION (type) <= CHAR_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 12 : 2));
+
+ else if (TYPE_PRECISION (type) <= SHORT_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 13 : 3));
+
+ else if (TYPE_PRECISION (type) <= INT_TYPE_SIZE)
+ return (qualifiers | (TREE_UNSIGNED (type) ? 14 : 4));
+
+ else
+ return (qualifiers | (TREE_UNSIGNED (type) ? 15 : 5));
+
+ case REAL_TYPE:
+ /* If this is a range type, consider it to be the underlying
+ type. */
+ if (TREE_TYPE (type) != 0)
+ break;
+
+ /* Carefully distinguish all the standard types of C,
+ without messing up if the language is not C. */
+
+ if (TYPE_PRECISION (type) == FLOAT_TYPE_SIZE)
+ return (qualifiers | 6);
+
+ else
+ return (qualifiers | 7);
+
+ case COMPLEX_TYPE: /* GNU Fortran COMPLEX type. */
+ /* ??? We need to distinguish between double and float complex types,
+ but I don't know how yet because I can't reach this code from
+ existing front-ends. */
+ return (qualifiers | 7); /* Who knows? */
+
+ case CHAR_TYPE: /* GNU Pascal CHAR type. Not used in C. */
+ case BOOLEAN_TYPE: /* GNU Fortran BOOLEAN type. */
+ case FILE_TYPE: /* GNU Pascal FILE type. */
+ case SET_TYPE: /* GNU Pascal SET type. */
+ case LANG_TYPE: /* ? */
+ return qualifiers;
+
+ default:
+ abort (); /* Not a type! */
+ }
+ }
+
+ return qualifiers;
+}
+
+/* Nested function support. */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function.
+
+ This takes 16 insns: 2 shifts & 2 ands (to split up addresses), 4 sethi
+ (to load in opcodes), 4 iors (to merge address and opcodes), and 4 writes
+ (to store insns). This is a bit excessive. Perhaps a different
+ mechanism would be better here.
+
+ Emit enough FLUSH insns to synchronize the data and instruction caches. */
+
+void
+sparc_initialize_trampoline (tramp, fnaddr, cxt)
+ rtx tramp, fnaddr, cxt;
+{
+ /* SPARC 32 bit trampoline:
+
+ sethi %hi(fn),%g1
+ sethi %hi(static),%g2
+ jmp %g1+%lo(fn)
+ or %g2,%lo(static),%g2
+
+ SETHI i,r = 00rr rrr1 00ii iiii iiii iiii iiii iiii
+ JMPL r+i,d = 10dd ddd1 1100 0rrr rr1i iiii iiii iiii
+ */
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 0)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, fnaddr,
+ size_int (10), 0, 1),
+ GEN_INT (0x03000000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ expand_binop (SImode, ior_optab,
+ expand_shift (RSHIFT_EXPR, SImode, cxt,
+ size_int (10), 0, 1),
+ GEN_INT (0x05000000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ expand_binop (SImode, ior_optab,
+ expand_and (fnaddr, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (0x81c06000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ expand_binop (SImode, ior_optab,
+ expand_and (cxt, GEN_INT (0x3ff), NULL_RTX),
+ GEN_INT (0x8410a000),
+ NULL_RTX, 1, OPTAB_DIRECT));
+
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode, tramp))));
+ /* On UltraSPARC a flush flushes an entire cache line. The trampoline is
+ aligned on a 16 byte boundary so one flush clears it all. */
+ if (sparc_cpu != PROCESSOR_ULTRASPARC)
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (SImode,
+ plus_constant (tramp, 8)))));
+}
+
+/* The 64 bit version is simpler because it makes more sense to load the
+ values as "immediate" data out of the trampoline. It's also easier since
+ we can read the PC without clobbering a register. */
+
+void
+sparc64_initialize_trampoline (tramp, fnaddr, cxt)
+ rtx tramp, fnaddr, cxt;
+{
+ /*
+ rd %pc,%g1
+ ldx [%g1+24],%g5
+ jmp %g5
+ ldx [%g1+16],%g5
+ +16 bytes data
+ */
+
+ emit_move_insn (gen_rtx_MEM (SImode, tramp),
+ GEN_INT (0x83414000));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 4)),
+ GEN_INT (0xca586018));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 8)),
+ GEN_INT (0x81c04000));
+ emit_move_insn (gen_rtx_MEM (SImode, plus_constant (tramp, 12)),
+ GEN_INT (0xca586010));
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 16)), cxt);
+ emit_move_insn (gen_rtx_MEM (DImode, plus_constant (tramp, 20)), fnaddr);
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (DImode, tramp))));
+ if (sparc_cpu != PROCESSOR_ULTRASPARC)
+ emit_insn (gen_flush (validize_mem (gen_rtx_MEM (DImode, plus_constant (tramp, 8)))));
+}
+
+/* Subroutines to support a flat (single) register window calling
+ convention. */
+
+/* Single-register window sparc stack frames look like:
+
+ Before call After call
+ +-----------------------+ +-----------------------+
+ high | | | |
+ mem | caller's temps. | | caller's temps. |
+ | | | |
+ +-----------------------+ +-----------------------+
+ | | | |
+ | arguments on stack. | | arguments on stack. |
+ | | | |
+ +-----------------------+FP+92->+-----------------------+
+ | 6 words to save | | 6 words to save |
+ | arguments passed | | arguments passed |
+ | in registers, even | | in registers, even |
+ | if not passed. | | if not passed. |
+ SP+68->+-----------------------+FP+68->+-----------------------+
+ | 1 word struct addr | | 1 word struct addr |
+ +-----------------------+FP+64->+-----------------------+
+ | | | |
+ | 16 word reg save area | | 16 word reg save area |
+ | | | |
+ SP->+-----------------------+ FP->+-----------------------+
+ | 4 word area for |
+ | fp/alu reg moves |
+ FP-16->+-----------------------+
+ | |
+ | local variables |
+ | |
+ +-----------------------+
+ | |
+ | fp register save |
+ | |
+ +-----------------------+
+ | |
+ | gp register save |
+ | |
+ +-----------------------+
+ | |
+ | alloca allocations |
+ | |
+ +-----------------------+
+ | |
+ | arguments on stack |
+ | |
+ SP+92->+-----------------------+
+ | 6 words to save |
+ | arguments passed |
+ | in registers, even |
+ low | if not passed. |
+ memory SP+68->+-----------------------+
+ | 1 word struct addr |
+ SP+64->+-----------------------+
+ | |
+ I 16 word reg save area |
+ | |
+ SP->+-----------------------+ */
+
+/* Structure to be filled in by sparc_flat_compute_frame_size with register
+ save masks, and offsets for the current function. */
+
+struct sparc_frame_info
+{
+ unsigned long total_size; /* # bytes that the entire frame takes up. */
+ unsigned long var_size; /* # bytes that variables take up. */
+ unsigned long args_size; /* # bytes that outgoing arguments take up. */
+ unsigned long extra_size; /* # bytes of extra gunk. */
+ unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
+ unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
+ unsigned long gmask; /* Mask of saved gp registers. */
+ unsigned long fmask; /* Mask of saved fp registers. */
+ unsigned long reg_offset; /* Offset from new sp to store regs. */
+ int initialized; /* Nonzero if frame size already calculated. */
+};
+
+/* Current frame information calculated by sparc_flat_compute_frame_size. */
+struct sparc_frame_info current_frame_info;
+
+/* Zero structure to initialize current_frame_info. */
+struct sparc_frame_info zero_frame_info;
+
+/* Tell prologue and epilogue if register REGNO should be saved / restored. */
+
+#define RETURN_ADDR_REGNUM 15
+#define FRAME_POINTER_MASK (1 << (FRAME_POINTER_REGNUM))
+#define RETURN_ADDR_MASK (1 << (RETURN_ADDR_REGNUM))
+
+#define MUST_SAVE_REGISTER(regno) \
+ ((regs_ever_live[regno] && !call_used_regs[regno]) \
+ || (regno == FRAME_POINTER_REGNUM && frame_pointer_needed) \
+ || (regno == RETURN_ADDR_REGNUM && regs_ever_live[RETURN_ADDR_REGNUM]))
+
+/* Return the bytes needed to compute the frame pointer from the current
+ stack pointer. */
+
+unsigned long
+sparc_flat_compute_frame_size (size)
+ int size; /* # of var. bytes allocated. */
+{
+ int regno;
+ unsigned long total_size; /* # bytes that the entire frame takes up. */
+ unsigned long var_size; /* # bytes that variables take up. */
+ unsigned long args_size; /* # bytes that outgoing arguments take up. */
+ unsigned long extra_size; /* # extra bytes. */
+ unsigned int gp_reg_size; /* # bytes needed to store gp regs. */
+ unsigned int fp_reg_size; /* # bytes needed to store fp regs. */
+ unsigned long gmask; /* Mask of saved gp registers. */
+ unsigned long fmask; /* Mask of saved fp registers. */
+ unsigned long reg_offset; /* Offset to register save area. */
+ int need_aligned_p; /* 1 if need the save area 8 byte aligned. */
+
+ /* This is the size of the 16 word reg save area, 1 word struct addr
+ area, and 4 word fp/alu register copy area. */
+ extra_size = -STARTING_FRAME_OFFSET + FIRST_PARM_OFFSET(0);
+ var_size = size;
+ /* Also include the size needed for the 6 parameter registers. */
+ args_size = current_function_outgoing_args_size + 24;
+ total_size = var_size + args_size + extra_size;
+ gp_reg_size = 0;
+ fp_reg_size = 0;
+ gmask = 0;
+ fmask = 0;
+ reg_offset = 0;
+ need_aligned_p = 0;
+
+ /* Calculate space needed for gp registers. */
+ for (regno = 1; regno <= 31; regno++)
+ {
+ if (MUST_SAVE_REGISTER (regno))
+ {
+ /* If we need to save two regs in a row, ensure there's room to bump
+ up the address to align it to a doubleword boundary. */
+ if ((regno & 0x1) == 0 && MUST_SAVE_REGISTER (regno+1))
+ {
+ if (gp_reg_size % 8 != 0)
+ gp_reg_size += 4;
+ gp_reg_size += 2 * UNITS_PER_WORD;
+ gmask |= 3 << regno;
+ regno++;
+ need_aligned_p = 1;
+ }
+ else
+ {
+ gp_reg_size += UNITS_PER_WORD;
+ gmask |= 1 << regno;
+ }
+ }
+ }
+
+ /* Calculate space needed for fp registers. */
+ for (regno = 32; regno <= 63; regno++)
+ {
+ if (regs_ever_live[regno] && !call_used_regs[regno])
+ {
+ fp_reg_size += UNITS_PER_WORD;
+ fmask |= 1 << (regno - 32);
+ }
+ }
+
+ if (gmask || fmask)
+ {
+ int n;
+ reg_offset = FIRST_PARM_OFFSET(0) + args_size;
+ /* Ensure save area is 8 byte aligned if we need it. */
+ n = reg_offset % 8;
+ if (need_aligned_p && n != 0)
+ {
+ total_size += 8 - n;
+ reg_offset += 8 - n;
+ }
+ total_size += gp_reg_size + fp_reg_size;
+ }
+
+ /* ??? This looks a little suspicious. Clarify. */
+ if (total_size == extra_size)
+ total_size = extra_size = 0;
+
+ total_size = SPARC_STACK_ALIGN (total_size);
+
+ /* Save other computed information. */
+ current_frame_info.total_size = total_size;
+ current_frame_info.var_size = var_size;
+ current_frame_info.args_size = args_size;
+ current_frame_info.extra_size = extra_size;
+ current_frame_info.gp_reg_size = gp_reg_size;
+ current_frame_info.fp_reg_size = fp_reg_size;
+ current_frame_info.gmask = gmask;
+ current_frame_info.fmask = fmask;
+ current_frame_info.reg_offset = reg_offset;
+ current_frame_info.initialized = reload_completed;
+
+ /* Ok, we're done. */
+ return total_size;
+}
+
+/* Save/restore registers in GMASK and FMASK at register BASE_REG plus offset
+ OFFSET.
+
+ BASE_REG must be 8 byte aligned. This allows us to test OFFSET for
+ appropriate alignment and use DOUBLEWORD_OP when we can. We assume
+ [BASE_REG+OFFSET] will always be a valid address.
+
+ WORD_OP is either "st" for save, "ld" for restore.
+ DOUBLEWORD_OP is either "std" for save, "ldd" for restore. */
+
+void
+sparc_flat_save_restore (file, base_reg, offset, gmask, fmask, word_op,
+ doubleword_op, base_offset)
+ FILE *file;
+ char *base_reg;
+ unsigned int offset;
+ unsigned long gmask;
+ unsigned long fmask;
+ char *word_op;
+ char *doubleword_op;
+ unsigned long base_offset;
+{
+ int regno;
+
+ if (gmask == 0 && fmask == 0)
+ return;
+
+ /* Save registers starting from high to low. We've already saved the
+ previous frame pointer and previous return address for the debugger's
+ sake. The debugger allows us to not need a nop in the epilog if at least
+ one register is reloaded in addition to return address. */
+
+ if (gmask)
+ {
+ for (regno = 1; regno <= 31; regno++)
+ {
+ if ((gmask & (1L << regno)) != 0)
+ {
+ if ((regno & 0x1) == 0 && ((gmask & (1L << (regno+1))) != 0))
+ {
+ /* We can save two registers in a row. If we're not at a
+ double word boundary, move to one.
+ sparc_flat_compute_frame_size ensures there's room to do
+ this. */
+ if (offset % 8 != 0)
+ offset += UNITS_PER_WORD;
+
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ doubleword_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ dwarf2out_reg_save (l, regno, offset + base_offset);
+ dwarf2out_reg_save
+ (l, regno+1, offset+base_offset + UNITS_PER_WORD);
+ }
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ doubleword_op, base_reg, offset,
+ reg_names[regno]);
+
+ offset += 2 * UNITS_PER_WORD;
+ regno++;
+ }
+ else
+ {
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ word_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", regno, offset + base_offset);
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ word_op, base_reg, offset, reg_names[regno]);
+
+ offset += UNITS_PER_WORD;
+ }
+ }
+ }
+ }
+
+ if (fmask)
+ {
+ for (regno = 32; regno <= 63; regno++)
+ {
+ if ((fmask & (1L << (regno - 32))) != 0)
+ {
+ if (word_op[0] == 's')
+ {
+ fprintf (file, "\t%s %s,[%s+%d]\n",
+ word_op, reg_names[regno],
+ base_reg, offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_reg_save ("", regno, offset + base_offset);
+ }
+ else
+ fprintf (file, "\t%s [%s+%d],%s\n",
+ word_op, base_reg, offset, reg_names[regno]);
+
+ offset += UNITS_PER_WORD;
+ }
+ }
+ }
+}
+
+/* Set up the stack and frame (if desired) for the function. */
+
+void
+sparc_flat_output_function_prologue (file, size)
+ FILE *file;
+ int size;
+{
+ char *sp_str = reg_names[STACK_POINTER_REGNUM];
+ unsigned long gmask = current_frame_info.gmask;
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#PROLOGUE# 0\n", ASM_COMMENT_START);
+ fprintf (file, "\t%s# vars= %ld, regs= %d/%d, args= %d, extra= %ld\n",
+ ASM_COMMENT_START,
+ current_frame_info.var_size,
+ current_frame_info.gp_reg_size / 4,
+ current_frame_info.fp_reg_size / 4,
+ current_function_outgoing_args_size,
+ current_frame_info.extra_size);
+
+ size = SPARC_STACK_ALIGN (size);
+ size = (! current_frame_info.initialized
+ ? sparc_flat_compute_frame_size (size)
+ : current_frame_info.total_size);
+
+ /* These cases shouldn't happen. Catch them now. */
+ if (size == 0 && (gmask || current_frame_info.fmask))
+ abort ();
+
+ /* Allocate our stack frame by decrementing %sp.
+ At present, the only algorithm gdb can use to determine if this is a
+ flat frame is if we always set %i7 if we set %sp. This can be optimized
+ in the future by putting in some sort of debugging information that says
+ this is a `flat' function. However, there is still the case of debugging
+ code without such debugging information (including cases where most fns
+ have such info, but there is one that doesn't). So, always do this now
+ so we don't get a lot of code out there that gdb can't handle.
+ If the frame pointer isn't needn't then that's ok - gdb won't be able to
+ distinguish us from a non-flat function but there won't (and shouldn't)
+ be any differences anyway. The return pc is saved (if necessary) right
+ after %i7 so gdb won't have to look too far to find it. */
+ if (size > 0)
+ {
+ unsigned int reg_offset = current_frame_info.reg_offset;
+ char *fp_str = reg_names[FRAME_POINTER_REGNUM];
+ char *t1_str = "%g1";
+
+ /* Things get a little tricky if local variables take up more than ~4096
+ bytes and outgoing arguments take up more than ~4096 bytes. When that
+ happens, the register save area can't be accessed from either end of
+ the frame. Handle this by decrementing %sp to the start of the gp
+ register save area, save the regs, update %i7, and then set %sp to its
+ final value. Given that we only have one scratch register to play
+ with it is the cheapest solution, and it helps gdb out as it won't
+ slow down recognition of flat functions.
+ Don't change the order of insns emitted here without checking with
+ the gdb folk first. */
+
+ /* Is the entire register save area offsettable from %sp? */
+ if (reg_offset < 4096 - 64 * UNITS_PER_WORD)
+ {
+ if (size <= 4096)
+ {
+ fprintf (file, "\tadd %s,%d,%s\n",
+ sp_str, -size, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ fp_str, sp_str, reg_offset);
+ fprintf (file, "\tsub %s,%d,%s\t%s# set up frame pointer\n",
+ sp_str, -size, fp_str, ASM_COMMENT_START);
+ reg_offset += 4;
+ }
+ }
+ else
+ {
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size, t1_str, sp_str, t1_str, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ fp_str, sp_str, reg_offset);
+ fprintf (file, "\tadd %s,%s,%s\t%s# set up frame pointer\n",
+ sp_str, t1_str, fp_str, ASM_COMMENT_START);
+ reg_offset += 4;
+ }
+ }
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ reg_offset - 4 - size);
+ dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ }
+ else
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size);
+ }
+ if (gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[RETURN_ADDR_REGNUM], sp_str, reg_offset);
+ if (dwarf2out_do_frame ())
+ dwarf2out_return_save ("", reg_offset - size);
+ reg_offset += 4;
+ }
+ sparc_flat_save_restore (file, sp_str, reg_offset,
+ gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "st", "std", -size);
+ }
+ else
+ {
+ /* Subtract %sp in two steps, but make sure there is always a
+ 64 byte register save area, and %sp is properly aligned. */
+ /* Amount to decrement %sp by, the first time. */
+ unsigned int size1 = ((size - reg_offset + 64) + 15) & -16;
+ /* Offset to register save area from %sp. */
+ unsigned int offset = size1 - (size - reg_offset);
+
+ if (size1 <= 4096)
+ {
+ fprintf (file, "\tadd %s,%d,%s\n",
+ sp_str, -size1, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n\tsub %s,%d,%s\t%s# set up frame pointer\n",
+ fp_str, sp_str, offset, sp_str, -size1, fp_str,
+ ASM_COMMENT_START);
+ offset += 4;
+ }
+ }
+ else
+ {
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size1, t1_str, sp_str, t1_str, sp_str);
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n\tadd %s,%s,%s\t%s# set up frame pointer\n",
+ fp_str, sp_str, offset, sp_str, t1_str, fp_str,
+ ASM_COMMENT_START);
+ offset += 4;
+ }
+ }
+ if (dwarf2out_do_frame ())
+ {
+ char *l = dwarf2out_cfi_label ();
+ if (gmask & FRAME_POINTER_MASK)
+ {
+ dwarf2out_reg_save (l, FRAME_POINTER_REGNUM,
+ offset - 4 - size1);
+ dwarf2out_def_cfa (l, FRAME_POINTER_REGNUM, 0);
+ }
+ else
+ dwarf2out_def_cfa (l, STACK_POINTER_REGNUM, size1);
+ }
+ if (gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tst %s,[%s+%d]\n",
+ reg_names[RETURN_ADDR_REGNUM], sp_str, offset);
+ if (dwarf2out_do_frame ())
+ /* offset - size1 == reg_offset - size
+ if reg_offset were updated above like offset. */
+ dwarf2out_return_save ("", offset - size1);
+ offset += 4;
+ }
+ sparc_flat_save_restore (file, sp_str, offset,
+ gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "st", "std", -size1);
+ fprintf (file, "\tset %d,%s\n\tsub %s,%s,%s\n",
+ size - size1, t1_str, sp_str, t1_str, sp_str);
+ if (dwarf2out_do_frame ())
+ if (! (gmask & FRAME_POINTER_MASK))
+ dwarf2out_def_cfa ("", STACK_POINTER_REGNUM, size);
+ }
+ }
+
+ fprintf (file, "\t%s#PROLOGUE# 1\n", ASM_COMMENT_START);
+}
+
+/* Do any necessary cleanup after a function to restore stack, frame,
+ and regs. */
+
+void
+sparc_flat_output_function_epilogue (file, size)
+ FILE *file;
+ int size;
+{
+ rtx epilogue_delay = current_function_epilogue_delay_list;
+ int noepilogue = FALSE;
+
+ /* This is only for the human reader. */
+ fprintf (file, "\t%s#EPILOGUE#\n", ASM_COMMENT_START);
+
+ /* The epilogue does not depend on any registers, but the stack
+ registers, so we assume that if we have 1 pending nop, it can be
+ ignored, and 2 it must be filled (2 nops occur for integer
+ multiply and divide). */
+
+ size = SPARC_STACK_ALIGN (size);
+ size = (!current_frame_info.initialized
+ ? sparc_flat_compute_frame_size (size)
+ : current_frame_info.total_size);
+
+ if (size == 0 && epilogue_delay == 0)
+ {
+ rtx insn = get_last_insn ();
+
+ /* If the last insn was a BARRIER, we don't have to write any code
+ because a jump (aka return) was put there. */
+ if (GET_CODE (insn) == NOTE)
+ insn = prev_nonnote_insn (insn);
+ if (insn && GET_CODE (insn) == BARRIER)
+ noepilogue = TRUE;
+ }
+
+ if (!noepilogue)
+ {
+ unsigned int reg_offset = current_frame_info.reg_offset;
+ unsigned int size1;
+ char *sp_str = reg_names[STACK_POINTER_REGNUM];
+ char *fp_str = reg_names[FRAME_POINTER_REGNUM];
+ char *t1_str = "%g1";
+
+ /* In the reload sequence, we don't need to fill the load delay
+ slots for most of the loads, also see if we can fill the final
+ delay slot if not otherwise filled by the reload sequence. */
+
+ if (size > 4095)
+ fprintf (file, "\tset %d,%s\n", size, t1_str);
+
+ if (frame_pointer_needed)
+ {
+ if (size > 4095)
+ fprintf (file,"\tsub %s,%s,%s\t\t%s# sp not trusted here\n",
+ fp_str, t1_str, sp_str, ASM_COMMENT_START);
+ else
+ fprintf (file,"\tsub %s,%d,%s\t\t%s# sp not trusted here\n",
+ fp_str, size, sp_str, ASM_COMMENT_START);
+ }
+
+ /* Is the entire register save area offsettable from %sp? */
+ if (reg_offset < 4096 - 64 * UNITS_PER_WORD)
+ {
+ size1 = 0;
+ }
+ else
+ {
+ /* Restore %sp in two steps, but make sure there is always a
+ 64 byte register save area, and %sp is properly aligned. */
+ /* Amount to increment %sp by, the first time. */
+ size1 = ((reg_offset - 64 - 16) + 15) & -16;
+ /* Offset to register save area from %sp. */
+ reg_offset = size1 - reg_offset;
+
+ fprintf (file, "\tset %d,%s\n\tadd %s,%s,%s\n",
+ size1, t1_str, sp_str, t1_str, sp_str);
+ }
+
+ /* We must restore the frame pointer and return address reg first
+ because they are treated specially by the prologue output code. */
+ if (current_frame_info.gmask & FRAME_POINTER_MASK)
+ {
+ fprintf (file, "\tld [%s+%d],%s\n",
+ sp_str, reg_offset, fp_str);
+ reg_offset += 4;
+ }
+ if (current_frame_info.gmask & RETURN_ADDR_MASK)
+ {
+ fprintf (file, "\tld [%s+%d],%s\n",
+ sp_str, reg_offset, reg_names[RETURN_ADDR_REGNUM]);
+ reg_offset += 4;
+ }
+
+ /* Restore any remaining saved registers. */
+ sparc_flat_save_restore (file, sp_str, reg_offset,
+ current_frame_info.gmask & ~(FRAME_POINTER_MASK | RETURN_ADDR_MASK),
+ current_frame_info.fmask,
+ "ld", "ldd", 0);
+
+ /* If we had to increment %sp in two steps, record it so the second
+ restoration in the epilogue finishes up. */
+ if (size1 > 0)
+ {
+ size -= size1;
+ if (size > 4095)
+ fprintf (file, "\tset %d,%s\n",
+ size, t1_str);
+ }
+
+ if (current_function_returns_struct)
+ fprintf (file, "\tjmp %%o7+12\n");
+ else
+ fprintf (file, "\tretl\n");
+
+ /* If the only register saved is the return address, we need a
+ nop, unless we have an instruction to put into it. Otherwise
+ we don't since reloading multiple registers doesn't reference
+ the register being loaded. */
+
+ if (epilogue_delay)
+ {
+ if (size)
+ abort ();
+ final_scan_insn (XEXP (epilogue_delay, 0), file, 1, -2, 1);
+ }
+
+ else if (size > 4095)
+ fprintf (file, "\tadd %s,%s,%s\n", sp_str, t1_str, sp_str);
+
+ else if (size > 0)
+ fprintf (file, "\tadd %s,%d,%s\n", sp_str, size, sp_str);
+
+ else
+ fprintf (file, "\tnop\n");
+ }
+
+ /* Reset state info for each function. */
+ current_frame_info = zero_frame_info;
+}
+
+/* Define the number of delay slots needed for the function epilogue.
+
+ On the sparc, we need a slot if either no stack has been allocated,
+ or the only register saved is the return register. */
+
+int
+sparc_flat_epilogue_delay_slots ()
+{
+ if (!current_frame_info.initialized)
+ (void) sparc_flat_compute_frame_size (get_frame_size ());
+
+ if (current_frame_info.total_size == 0)
+ return 1;
+
+ return 0;
+}
+
+/* Return true is TRIAL is a valid insn for the epilogue delay slot.
+ Any single length instruction which doesn't reference the stack or frame
+ pointer is OK. */
+
+int
+sparc_flat_eligible_for_epilogue_delay (trial, slot)
+ rtx trial;
+ int slot ATTRIBUTE_UNUSED;
+{
+ rtx pat = PATTERN (trial);
+
+ if (get_attr_length (trial) != 1)
+ return 0;
+
+ /* If %g0 is live, there are lots of things we can't handle.
+ Rather than trying to find them all now, let's punt and only
+ optimize things as necessary. */
+ if (TARGET_LIVE_G0)
+ return 0;
+
+ if (! reg_mentioned_p (stack_pointer_rtx, pat)
+ && ! reg_mentioned_p (frame_pointer_rtx, pat))
+ return 1;
+
+ return 0;
+}
+
+/* Adjust the cost of a scheduling dependency. Return the new cost of
+ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
+
+int
+supersparc_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ enum attr_type insn_type;
+
+ if (! recog_memoized (insn))
+ return 0;
+
+ insn_type = get_attr_type (insn);
+
+ if (REG_NOTE_KIND (link) == 0)
+ {
+ /* Data dependency; DEP_INSN writes a register that INSN reads some
+ cycles later. */
+
+ /* if a load, then the dependence must be on the memory address;
+ add an extra 'cycle'. Note that the cost could be two cycles
+ if the reg was written late in an instruction group; we can't tell
+ here. */
+ if (insn_type == TYPE_LOAD || insn_type == TYPE_FPLOAD)
+ return cost + 3;
+
+ /* Get the delay only if the address of the store is the dependence. */
+ if (insn_type == TYPE_STORE || insn_type == TYPE_FPSTORE)
+ {
+ rtx pat = PATTERN(insn);
+ rtx dep_pat = PATTERN (dep_insn);
+
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
+ return cost; /* This shouldn't happen! */
+
+ /* The dependency between the two instructions was on the data that
+ is being stored. Assume that this implies that the address of the
+ store is not dependent. */
+ if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
+ return cost;
+
+ return cost + 3; /* An approximation. */
+ }
+
+ /* A shift instruction cannot receive its data from an instruction
+ in the same cycle; add a one cycle penalty. */
+ if (insn_type == TYPE_SHIFT)
+ return cost + 3; /* Split before cascade into shift. */
+ }
+ else
+ {
+ /* Anti- or output- dependency; DEP_INSN reads/writes a register that
+ INSN writes some cycles later. */
+
+ /* These are only significant for the fpu unit; writing a fp reg before
+ the fpu has finished with it stalls the processor. */
+
+ /* Reusing an integer register causes no problems. */
+ if (insn_type == TYPE_IALU || insn_type == TYPE_SHIFT)
+ return 0;
+ }
+
+ return cost;
+}
+
+int
+ultrasparc_adjust_cost (insn, link, dep_insn, cost)
+ rtx insn;
+ rtx link;
+ rtx dep_insn;
+ int cost;
+{
+ enum attr_type insn_type, dep_type;
+ rtx pat = PATTERN(insn);
+ rtx dep_pat = PATTERN (dep_insn);
+
+ if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
+ return cost;
+
+ insn_type = get_attr_type (insn);
+ dep_type = get_attr_type (dep_insn);
+
+#define SLOW_FP(dep_type) \
+(dep_type == TYPE_FPSQRT || dep_type == TYPE_FPDIVS || dep_type == TYPE_FPDIVD)
+
+ switch (REG_NOTE_KIND (link))
+ {
+ case 0:
+ /* Data dependency; DEP_INSN writes a register that INSN reads some
+ cycles later. */
+
+ switch (insn_type)
+ {
+ /* UltraSPARC can dual issue a store and an instruction setting
+ the value stored, except for divide and square root. */
+ case TYPE_FPSTORE:
+ if (! SLOW_FP (dep_type))
+ return 0;
+ return cost;
+
+ case TYPE_STORE:
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
+ return cost;
+
+ if (rtx_equal_p (SET_DEST (dep_pat), SET_SRC (pat)))
+ /* The dependency between the two instructions is on the data
+ that is being stored. Assume that the address of the store
+ is not also dependent. */
+ return 0;
+ return cost;
+
+ case TYPE_LOAD:
+ case TYPE_SLOAD:
+ case TYPE_FPLOAD:
+ /* A load does not return data until at least 11 cycles after
+ a store to the same location. 3 cycles are accounted for
+ in the load latency; add the other 8 here. */
+ if (dep_type == TYPE_STORE || dep_type == TYPE_FPSTORE)
+ {
+ /* If the addresses are not equal this may be a false
+ dependency because pointer aliasing could not be
+ determined. Add only 2 cycles in that case. 2 is
+ an arbitrary compromise between 8, which would cause
+ the scheduler to generate worse code elsewhere to
+ compensate for a dependency which might not really
+ exist, and 0. */
+ if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET
+ || GET_CODE (SET_SRC (pat)) != MEM
+ || GET_CODE (SET_DEST (dep_pat)) != MEM
+ || ! rtx_equal_p (XEXP (SET_SRC (pat), 0),
+ XEXP (SET_DEST (dep_pat), 0)))
+ return cost + 2;
+
+ return cost + 8;
+ }
+ return cost;
+
+ case TYPE_BRANCH:
+ /* Compare to branch latency is 0. There is no benefit from
+ separating compare and branch. */
+ if (dep_type == TYPE_COMPARE)
+ return 0;
+ /* Floating point compare to branch latency is less than
+ compare to conditional move. */
+ if (dep_type == TYPE_FPCMP)
+ return cost - 1;
+ return cost;
+
+ case TYPE_FPCMOVE:
+ /* FMOVR class instructions can not issue in the same cycle
+ or the cycle after an instruction which writes any
+ integer register. Model this as cost 2 for dependent
+ instructions. */
+ if ((dep_type == TYPE_IALU || dep_type == TYPE_UNARY
+ || dep_type == TYPE_BINARY)
+ && cost < 2)
+ return 2;
+ /* Otherwise check as for integer conditional moves. */
+
+ case TYPE_CMOVE:
+ /* Conditional moves involving integer registers wait until
+ 3 cycles after loads return data. The interlock applies
+ to all loads, not just dependent loads, but that is hard
+ to model. */
+ if (dep_type == TYPE_LOAD || dep_type == TYPE_SLOAD)
+ return cost + 3;
+ return cost;
+
+ default:
+ break;
+ }
+ break;
+
+ case REG_DEP_ANTI:
+ /* Divide and square root lock destination registers for full latency. */
+ if (! SLOW_FP (dep_type))
+ return 0;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Other costs not accounted for:
+ - Multiply should be modeled as having no latency because there is
+ nothing the scheduler can do about it.
+ - Single precision floating point loads lock the other half of
+ the even/odd register pair.
+ - Several hazards associated with ldd/std are ignored because these
+ instructions are rarely generated for V9.
+ - A shift following an integer instruction which does not set the
+ condition codes can not issue in the same cycle.
+ - The floating point pipeline can not have both a single and double
+ precision operation active at the same time. Format conversions
+ and graphics instructions are given honorary double precision status.
+ - call and jmpl are always the first instruction in a group. */
+
+ return cost;
+}
+
+int
+sparc_issue_rate ()
+{
+ switch (sparc_cpu)
+ {
+ default:
+ return 1;
+ case PROCESSOR_V9:
+ /* Assume V9 processors are capable of at least dual-issue. */
+ return 2;
+ case PROCESSOR_SUPERSPARC:
+ return 3;
+ case PROCESSOR_ULTRASPARC:
+ return 4;
+ }
+}
+
+static int
+set_extends(x, insn)
+ rtx x, insn;
+{
+ register rtx pat = PATTERN (insn);
+
+ switch (GET_CODE (SET_SRC (pat)))
+ {
+ /* Load and some shift instructions zero extend. */
+ case MEM:
+ case ZERO_EXTEND:
+ /* sethi clears the high bits */
+ case HIGH:
+ /* LO_SUM is used with sethi. sethi cleared the high
+ bits and the values used with lo_sum are positive */
+ case LO_SUM:
+ /* UNSPEC is v8plus_clear_high */
+ case UNSPEC:
+ /* Store flag stores 0 or 1 */
+ case LT: case LTU:
+ case GT: case GTU:
+ case LE: case LEU:
+ case GE: case GEU:
+ case EQ:
+ case NE:
+ return 1;
+ case AND:
+ {
+ rtx op1 = XEXP (SET_SRC (pat), 1);
+ if (GET_CODE (op1) == CONST_INT)
+ return INTVAL (op1) >= 0;
+ if (GET_CODE (XEXP (SET_SRC (pat), 0)) == REG
+ && sparc_check_64 (XEXP (SET_SRC (pat), 0), insn) == 1)
+ return 1;
+ if (GET_CODE (op1) == REG
+ && sparc_check_64 ((op1), insn) == 1)
+ return 1;
+ }
+ case ASHIFT:
+ case LSHIFTRT:
+ return GET_MODE (SET_SRC (pat)) == SImode;
+ /* Positive integers leave the high bits zero. */
+ case CONST_DOUBLE:
+ return ! (CONST_DOUBLE_LOW (x) & 0x80000000);
+ case CONST_INT:
+ return ! (INTVAL (x) & 0x80000000);
+ case ASHIFTRT:
+ case SIGN_EXTEND:
+ return - (GET_MODE (SET_SRC (pat)) == SImode);
+ default:
+ return 0;
+ }
+}
+
+/* Return 0 if the high 32 bits of X (the low word of X, if DImode) are
+ unknown. Return 1 if the high bits are zero, -1 if the register is
+ sign extended. */
+int
+sparc_check_64 (x, insn)
+ rtx x, insn;
+{
+ /* If a register is set only once it is safe to ignore insns this
+ code does not know how to handle. The loop will either recognize
+ the single set and return the correct value or fail to recognize
+ it and return 0. */
+ int set_once = 0;
+
+ if (GET_CODE (x) == REG
+ && flag_expensive_optimizations
+ && REG_N_SETS (REGNO (x)) == 1)
+ set_once = 1;
+
+ if (insn == 0)
+ {
+ if (set_once)
+ insn = get_last_insn_anywhere ();
+ else
+ return 0;
+ }
+
+ while ((insn = PREV_INSN (insn)))
+ {
+ switch (GET_CODE (insn))
+ {
+ case JUMP_INSN:
+ case NOTE:
+ break;
+ case CODE_LABEL:
+ case CALL_INSN:
+ default:
+ if (! set_once)
+ return 0;
+ break;
+ case INSN:
+ {
+ rtx pat = PATTERN (insn);
+ if (GET_CODE (pat) != SET)
+ return 0;
+ if (rtx_equal_p (x, SET_DEST (pat)))
+ return set_extends (x, insn);
+ if (reg_overlap_mentioned_p (SET_DEST (pat), x))
+ return 0;
+ }
+ }
+ }
+ return 0;
+}
+
+char *
+sparc_v8plus_shift (operands, insn, opcode)
+ rtx *operands;
+ rtx insn;
+ char *opcode;
+{
+ static char asm_code[60];
+
+ if (GET_CODE (operands[3]) == SCRATCH)
+ operands[3] = operands[0];
+ if (GET_CODE (operands[1]) == CONST_INT)
+ {
+ output_asm_insn ("mov %1,%3", operands);
+ }
+ else
+ {
+ output_asm_insn ("sllx %H1,32,%3", operands);
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn ("srl %L1,0,%L1", operands);
+ output_asm_insn ("or %L1,%3,%3", operands);
+ }
+
+ strcpy(asm_code, opcode);
+ if (which_alternative != 2)
+ return strcat (asm_code, " %0,%2,%L0\n\tsrlx %L0,32,%H0");
+ else
+ return strcat (asm_code, " %3,%2,%3\n\tsrlx %3,32,%H0\n\tmov %3,%L0");
+}
+
+
+/* Return 1 if DEST and SRC reference only global and in registers. */
+
+int
+sparc_return_peephole_ok (dest, src)
+ rtx dest, src;
+{
+ if (! TARGET_V9)
+ return 0;
+ if (leaf_function)
+ return 0;
+ if (GET_CODE (src) != CONST_INT
+ && (GET_CODE (src) != REG || ! IN_OR_GLOBAL_P (src)))
+ return 0;
+ return IN_OR_GLOBAL_P (dest);
+}
diff --git a/contrib/gcc/config/sparc/sparc.h b/contrib/gcc/config/sparc/sparc.h
new file mode 100644
index 0000000..e66f5e6
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.h
@@ -0,0 +1,3287 @@
+/* Definitions of target machine for GNU compiler, for Sun SPARC.
+ Copyright (C) 1987, 88, 89, 92, 94-97, 1998 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+ 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+ at Cygnus Support.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Note that some other tm.h files include this one and then override
+ whatever definitions are necessary. */
+
+/* Specify this in a cover file to provide bi-architecture (32/64) support. */
+/* #define SPARC_BI_ARCH */
+
+/* Macro used later in this file to determine default architecture. */
+#define DEFAULT_ARCH32_P ((TARGET_DEFAULT & MASK_64BIT) == 0)
+
+/* TARGET_ARCH{32,64} are the main macros to decide which of the two
+ architectures to compile for. We allow targets to choose compile time or
+ runtime selection. */
+#ifdef SPARC_BI_ARCH
+#define TARGET_ARCH32 (! TARGET_64BIT)
+#else
+#define TARGET_ARCH32 (DEFAULT_ARCH32_P)
+#endif
+#define TARGET_ARCH64 (! TARGET_ARCH32)
+
+/* Code model selection.
+ -mcmodel is used to select the v9 code model.
+ Different code models aren't supported for v8 code.
+
+ TARGET_CM_32: 32 bit address space, top 32 bits = 0,
+ pointers are 32 bits. Note that this isn't intended
+ to imply a v8 abi.
+
+ TARGET_CM_MEDLOW: 32 bit address space, top 32 bits = 0,
+ avoid generating %uhi and %ulo terms,
+ pointers are 64 bits.
+
+ TARGET_CM_MEDMID: 64 bit address space.
+ The executable must be in the low 16 TB of memory.
+ This corresponds to the low 44 bits, and the %[hml]44
+ relocs are used.
+
+ TARGET_CM_MEDANY: 64 bit address space.
+ The text and data segments have a maximum size of 31
+ bits and may be located anywhere. The maximum offset
+ from any instruction to the label _GLOBAL_OFFSET_TABLE_
+ is 31 bits.
+
+ TARGET_CM_EMBMEDANY: 64 bit address space.
+ The text and data segments have a maximum size of 31 bits
+ and may be located anywhere. Register %g4 contains
+ the start address of the data segment.
+*/
+
+enum cmodel {
+ CM_32,
+ CM_MEDLOW,
+ CM_MEDMID,
+ CM_MEDANY,
+ CM_EMBMEDANY
+};
+
+/* Value of -mcmodel specified by user. */
+extern char *sparc_cmodel_string;
+/* One of CM_FOO. */
+extern enum cmodel sparc_cmodel;
+
+/* V9 code model selection. */
+#define TARGET_CM_MEDLOW (sparc_cmodel == CM_MEDLOW)
+#define TARGET_CM_MEDMID (sparc_cmodel == CM_MEDMID)
+#define TARGET_CM_MEDANY (sparc_cmodel == CM_MEDANY)
+#define TARGET_CM_EMBMEDANY (sparc_cmodel == CM_EMBMEDANY)
+
+#define SPARC_DEFAULT_CMODEL CM_MEDLOW
+
+/* This is call-clobbered in the normal ABI, but is reserved in the
+ home grown (aka upward compatible) embedded ABI. */
+#define EMBMEDANY_BASE_REG "%g4"
+
+/* Values of TARGET_CPU_DEFAULT, set via -D in the Makefile,
+ and specified by the user via --with-cpu=foo.
+ This specifies the cpu implementation, not the architecture size. */
+#define TARGET_CPU_sparc 0
+#define TARGET_CPU_v7 0 /* alias for previous */
+#define TARGET_CPU_sparclet 1
+#define TARGET_CPU_sparclite 2
+#define TARGET_CPU_v8 3 /* generic v8 implementation */
+#define TARGET_CPU_supersparc 4
+#define TARGET_CPU_v9 5 /* generic v9 implementation */
+#define TARGET_CPU_sparc64 5 /* alias */
+#define TARGET_CPU_ultrasparc 6
+
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparc || TARGET_CPU_DEFAULT == TARGET_CPU_v8 || TARGET_CPU_DEFAULT == TARGET_CPU_supersparc
+#define CPP_CPU_DEFAULT_SPEC ""
+#define ASM_CPU_DEFAULT_SPEC ""
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclet
+#define CPP_CPU_DEFAULT_SPEC "-D__sparclet__"
+#define ASM_CPU_DEFAULT_SPEC "-Asparclet"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_sparclite
+#define CPP_CPU_DEFAULT_SPEC "-D__sparclite__"
+#define ASM_CPU_DEFAULT_SPEC "-Asparclite"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_v9
+/* ??? What does Sun's CC pass? */
+#define CPP_CPU_DEFAULT_SPEC "-D__sparc_v9__"
+/* ??? It's not clear how other assemblers will handle this, so by default
+ use GAS. Sun's Solaris assembler recognizes -xarch=v8plus, but this case
+ is handled in sol2.h. */
+#define ASM_CPU_DEFAULT_SPEC "-Av9"
+#endif
+#if TARGET_CPU_DEFAULT == TARGET_CPU_ultrasparc
+#define CPP_CPU_DEFAULT_SPEC "-D__sparc_v9__"
+#define ASM_CPU_DEFAULT_SPEC "-Av9a"
+#endif
+#ifndef CPP_CPU_DEFAULT_SPEC
+Unrecognized value in TARGET_CPU_DEFAULT.
+#endif
+
+/* Names to predefine in the preprocessor for this target machine.
+ ??? It would be nice to not include any subtarget specific values here,
+ however there's no way to portably provide subtarget values to
+ CPP_PREFINES. Also, -D values in CPP_SUBTARGET_SPEC don't get turned into
+ foo, __foo and __foo__. */
+
+#define CPP_PREDEFINES "-Dsparc -Dsun -Dunix -Asystem(unix) -Asystem(bsd)"
+
+/* Define macros to distinguish architectures. */
+
+/* Common CPP definitions used by CPP_SPEC amongst the various targets
+ for handling -mcpu=xxx switches. */
+#define CPP_CPU_SPEC "\
+%{mcypress:} \
+%{msparclite:-D__sparclite__} \
+%{mf930:-D__sparclite__} %{mf934:-D__sparclite__} \
+%{mv8:-D__sparc_v8__} \
+%{msupersparc:-D__supersparc__ -D__sparc_v8__} \
+%{mcpu=sparclet:-D__sparclet__} %{mcpu=tsc701:-D__sparclet__} \
+%{mcpu=sparclite:-D__sparclite__} \
+%{mcpu=f930:-D__sparclite__} %{mcpu=f934:-D__sparclite__} \
+%{mcpu=v8:-D__sparc_v8__} \
+%{mcpu=supersparc:-D__supersparc__ -D__sparc_v8__} \
+%{mcpu=v9:-D__sparc_v9__} \
+%{mcpu=ultrasparc:-D__sparc_v9__} \
+%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(cpp_cpu_default)}}}}}}} \
+"
+
+/* ??? The GCC_NEW_VARARGS macro is now obsolete, because gcc always uses
+ the right varags.h file when bootstrapping. */
+/* ??? It's not clear what value we want to use for -Acpu/machine for
+ sparc64 in 32 bit environments, so for now we only use `sparc64' in
+ 64 bit environments. */
+
+#define CPP_ARCH32_SPEC "-D__GCC_NEW_VARARGS__ -Acpu(sparc) -Amachine(sparc)"
+#define CPP_ARCH64_SPEC "-D__arch64__ -Acpu(sparc64) -Amachine(sparc64)"
+#define CPP_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? CPP_ARCH32_SPEC : CPP_ARCH64_SPEC)
+
+#define CPP_ARCH_SPEC "\
+%{m32:%(cpp_arch32)} \
+%{m64:%(cpp_arch64)} \
+%{!m32:%{!m64:%(cpp_arch_default)}} \
+"
+
+/* Macros to distinguish endianness. */
+#define CPP_ENDIAN_SPEC "%{mlittle-endian:-D__LITTLE_ENDIAN__}"
+
+/* Macros to distinguish the particular subtarget. */
+#define CPP_SUBTARGET_SPEC ""
+
+#define CPP_SPEC "%(cpp_cpu) %(cpp_arch) %(cpp_endian) %(cpp_subtarget)"
+
+/* Prevent error on `-sun4' and `-target sun4' options. */
+/* This used to translate -dalign to -malign, but that is no good
+ because it can't turn off the usual meaning of making debugging dumps. */
+/* Translate old style -m<cpu> into new style -mcpu=<cpu>.
+ ??? Delete support for -m<cpu> for 2.9. */
+
+#define CC1_SPEC "\
+%{sun4:} %{target:} \
+%{mcypress:-mcpu=cypress} \
+%{msparclite:-mcpu=sparclite} %{mf930:-mcpu=f930} %{mf934:-mcpu=f934} \
+%{mv8:-mcpu=v8} %{msupersparc:-mcpu=supersparc} \
+"
+
+/* Override in target specific files. */
+#define ASM_CPU_SPEC "\
+%{mcpu=sparclet:-Asparclet} %{mcpu=tsc701:-Asparclet} \
+%{msparclite:-Asparclite} \
+%{mf930:-Asparclite} %{mf934:-Asparclite} \
+%{mcpu=sparclite:-Asparclite} \
+%{mcpu=f930:-Asparclite} %{mcpu=f934:-Asparclite} \
+%{mv8plus:-Av8plus} \
+%{mcpu=v9:-Av9} \
+%{mcpu=ultrasparc:%{!mv8plus:-Av9a}} \
+%{!mcpu*:%{!mcypress:%{!msparclite:%{!mf930:%{!mf934:%{!mv8:%{!msupersparc:%(asm_cpu_default)}}}}}}} \
+"
+
+/* Word size selection, among other things.
+ This is what GAS uses. Add %(asm_arch) to ASM_SPEC to enable. */
+
+#define ASM_ARCH32_SPEC "-32"
+#define ASM_ARCH64_SPEC "-64"
+#define ASM_ARCH_DEFAULT_SPEC \
+(DEFAULT_ARCH32_P ? ASM_ARCH32_SPEC : ASM_ARCH64_SPEC)
+
+#define ASM_ARCH_SPEC "\
+%{m32:%(asm_arch32)} \
+%{m64:%(asm_arch64)} \
+%{!m32:%{!m64:%(asm_arch_default)}} \
+"
+
+/* Special flags to the Sun-4 assembler when using pipe for input. */
+
+#define ASM_SPEC "\
+%| %{R} %{!pg:%{!p:%{fpic:-k} %{fPIC:-k}}} %{keep-local-as-symbols:-L} \
+%(asm_cpu) \
+"
+
+#define LIB_SPEC "%{!shared:%{!p:%{!pg:-lc}}%{p:-lc_p}%{pg:-lc_p} %{g:-lg}}"
+
+/* Provide required defaults for linker -e and -d switches. */
+
+#define LINK_SPEC \
+ "%{!shared:%{!nostdlib:%{!r*:%{!e*:-e start}}} -dc -dp} %{static:-Bstatic} \
+ %{assert*} %{shared:%{!mimpure-text:-assert pure-text}}"
+
+/* This macro defines names of additional specifications to put in the specs
+ that can be used in various specifications like CC1_SPEC. Its definition
+ is an initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ specification name, and a string constant that used by the GNU CC driver
+ program.
+
+ Do not define this macro if it does not need to do anything. */
+
+#define EXTRA_SPECS \
+ { "cpp_cpu", CPP_CPU_SPEC }, \
+ { "cpp_cpu_default", CPP_CPU_DEFAULT_SPEC }, \
+ { "cpp_arch32", CPP_ARCH32_SPEC }, \
+ { "cpp_arch64", CPP_ARCH64_SPEC }, \
+ { "cpp_arch_default", CPP_ARCH_DEFAULT_SPEC }, \
+ { "cpp_arch", CPP_ARCH_SPEC }, \
+ { "cpp_endian", CPP_ENDIAN_SPEC }, \
+ { "cpp_subtarget", CPP_SUBTARGET_SPEC }, \
+ { "asm_cpu", ASM_CPU_SPEC }, \
+ { "asm_cpu_default", ASM_CPU_DEFAULT_SPEC }, \
+ { "asm_arch32", ASM_ARCH32_SPEC }, \
+ { "asm_arch64", ASM_ARCH64_SPEC }, \
+ { "asm_arch_default", ASM_ARCH_DEFAULT_SPEC }, \
+ { "asm_arch", ASM_ARCH_SPEC }, \
+ SUBTARGET_EXTRA_SPECS
+
+#define SUBTARGET_EXTRA_SPECS
+
+#ifdef SPARC_BI_ARCH
+#define NO_BUILTIN_PTRDIFF_TYPE
+#define NO_BUILTIN_SIZE_TYPE
+#endif
+#define PTRDIFF_TYPE (TARGET_ARCH64 ? "long int" : "int")
+#define SIZE_TYPE (TARGET_ARCH64 ? "long unsigned int" : "unsigned int")
+
+/* ??? This should be 32 bits for v9 but what can we do? */
+#define WCHAR_TYPE "short unsigned int"
+#define WCHAR_TYPE_SIZE 16
+#define MAX_WCHAR_TYPE_SIZE 16
+
+/* Show we can debug even without a frame pointer. */
+#define CAN_DEBUG_WITHOUT_FP
+
+/* To make profiling work with -f{pic,PIC}, we need to emit the profiling
+ code into the rtl. Also, if we are profiling, we cannot eliminate
+ the frame pointer (because the return address will get smashed). */
+
+void sparc_override_options ();
+
+#define OVERRIDE_OPTIONS \
+ do { \
+ if (profile_flag || profile_block_flag || profile_arc_flag) \
+ { \
+ if (flag_pic) \
+ { \
+ char *pic_string = (flag_pic == 1) ? "-fpic" : "-fPIC"; \
+ warning ("%s and profiling conflict: disabling %s", \
+ pic_string, pic_string); \
+ flag_pic = 0; \
+ } \
+ flag_omit_frame_pointer = 0; \
+ } \
+ sparc_override_options (); \
+ SUBTARGET_OVERRIDE_OPTIONS; \
+ } while (0)
+
+/* This is meant to be redefined in the host dependent files. */
+#define SUBTARGET_OVERRIDE_OPTIONS
+
+/* These compiler options take an argument. We ignore -target for now. */
+
+#define WORD_SWITCH_TAKES_ARG(STR) \
+ (DEFAULT_WORD_SWITCH_TAKES_ARG (STR) \
+ || !strcmp (STR, "target") || !strcmp (STR, "assert"))
+
+/* Print subsidiary information on the compiler version in use. */
+
+#define TARGET_VERSION fprintf (stderr, " (sparc)");
+
+/* Generate DBX debugging information. */
+
+#define DBX_DEBUGGING_INFO
+
+/* Run-time compilation parameters selecting different hardware subsets. */
+
+extern int target_flags;
+
+/* Nonzero if we should generate code to use the fpu. */
+#define MASK_FPU 1
+#define TARGET_FPU (target_flags & MASK_FPU)
+
+/* Nonzero if we should use FUNCTION_EPILOGUE. Otherwise, we
+ use fast return insns, but lose some generality. */
+#define MASK_EPILOGUE 2
+#define TARGET_EPILOGUE (target_flags & MASK_EPILOGUE)
+
+/* Nonzero if we should assume that double pointers might be unaligned.
+ This can happen when linking gcc compiled code with other compilers,
+ because the ABI only guarantees 4 byte alignment. */
+#define MASK_UNALIGNED_DOUBLES 4
+#define TARGET_UNALIGNED_DOUBLES (target_flags & MASK_UNALIGNED_DOUBLES)
+
+/* Nonzero means that we should generate code for a v8 sparc. */
+#define MASK_V8 0x8
+#define TARGET_V8 (target_flags & MASK_V8)
+
+/* Nonzero means that we should generate code for a sparclite.
+ This enables the sparclite specific instructions, but does not affect
+ whether FPU instructions are emitted. */
+#define MASK_SPARCLITE 0x10
+#define TARGET_SPARCLITE (target_flags & MASK_SPARCLITE)
+
+/* Nonzero if we're compiling for the sparclet. */
+#define MASK_SPARCLET 0x20
+#define TARGET_SPARCLET (target_flags & MASK_SPARCLET)
+
+/* Nonzero if we're compiling for v9 sparc.
+ Note that v9's can run in 32 bit mode so this doesn't necessarily mean
+ the word size is 64. */
+#define MASK_V9 0x40
+#define TARGET_V9 (target_flags & MASK_V9)
+
+/* Non-zero to generate code that uses the instructions deprecated in
+ the v9 architecture. This option only applies to v9 systems. */
+/* ??? This isn't user selectable yet. It's used to enable such insns
+ on 32 bit v9 systems and for the moment they're permanently disabled
+ on 64 bit v9 systems. */
+#define MASK_DEPRECATED_V8_INSNS 0x80
+#define TARGET_DEPRECATED_V8_INSNS (target_flags & MASK_DEPRECATED_V8_INSNS)
+
+/* Mask of all CPU selection flags. */
+#define MASK_ISA \
+(MASK_V8 + MASK_SPARCLITE + MASK_SPARCLET + MASK_V9 + MASK_DEPRECATED_V8_INSNS)
+
+/* Non-zero means don't pass `-assert pure-text' to the linker. */
+#define MASK_IMPURE_TEXT 0x100
+#define TARGET_IMPURE_TEXT (target_flags & MASK_IMPURE_TEXT)
+
+/* Nonzero means that we should generate code using a flat register window
+ model, i.e. no save/restore instructions are generated, which is
+ compatible with normal sparc code.
+ The frame pointer is %i7 instead of %fp. */
+#define MASK_FLAT 0x200
+#define TARGET_FLAT (target_flags & MASK_FLAT)
+
+/* Nonzero means use the registers that the Sparc ABI reserves for
+ application software. This must be the default to coincide with the
+ setting in FIXED_REGISTERS. */
+#define MASK_APP_REGS 0x400
+#define TARGET_APP_REGS (target_flags & MASK_APP_REGS)
+
+/* Option to select how quad word floating point is implemented.
+ When TARGET_HARD_QUAD is true, we use the hardware quad instructions.
+ Otherwise, we use the SPARC ABI quad library functions. */
+#define MASK_HARD_QUAD 0x800
+#define TARGET_HARD_QUAD (target_flags & MASK_HARD_QUAD)
+
+/* Non-zero on little-endian machines. */
+/* ??? Little endian support currently only exists for sparclet-aout and
+ sparc64-elf configurations. May eventually want to expand the support
+ to all targets, but for now it's kept local to only those two. */
+#define MASK_LITTLE_ENDIAN 0x1000
+#define TARGET_LITTLE_ENDIAN (target_flags & MASK_LITTLE_ENDIAN)
+
+/* 0x2000, 0x4000 are unused */
+
+/* Nonzero if pointers are 64 bits.
+ This is not a user selectable option, though it may be one day -
+ so it is used to determine pointer size instead of an architecture flag. */
+#define MASK_PTR64 0x8000
+#define TARGET_PTR64 (target_flags & MASK_PTR64)
+
+/* Nonzero if generating code to run in a 64 bit environment.
+ This is intended to only be used by TARGET_ARCH{32,64} as they are the
+ mechanism used to control compile time or run time selection. */
+#define MASK_64BIT 0x10000
+#define TARGET_64BIT (target_flags & MASK_64BIT)
+
+/* 0x20000,0x40000 unused */
+
+/* Non-zero means use a stack bias of 2047. Stack offsets are obtained by
+ adding 2047 to %sp. This option is for v9 only and is the default. */
+#define MASK_STACK_BIAS 0x80000
+#define TARGET_STACK_BIAS (target_flags & MASK_STACK_BIAS)
+
+/* Non-zero means %g0 is a normal register.
+ We still clobber it as necessary, but we can't rely on it always having
+ a zero value.
+ We don't bother to support this in true 64 bit mode. */
+#define MASK_LIVE_G0 0x100000
+#define TARGET_LIVE_G0 (target_flags & MASK_LIVE_G0)
+
+/* Non-zero means the cpu has broken `save' and `restore' insns, only
+ the trivial versions work (save %g0,%g0,%g0; restore %g0,%g0,%g0).
+ We assume the environment will properly handle or otherwise avoid
+ trouble associated with an interrupt occurring after the `save' or trap
+ occurring during it. */
+#define MASK_BROKEN_SAVERESTORE 0x200000
+#define TARGET_BROKEN_SAVERESTORE (target_flags & MASK_BROKEN_SAVERESTORE)
+
+/* Non-zero means -m{,no-}fpu was passed on the command line. */
+#define MASK_FPU_SET 0x400000
+#define TARGET_FPU_SET (target_flags & MASK_FPU_SET)
+
+/* Use the UltraSPARC Visual Instruction Set extensions. */
+#define MASK_VIS 0x1000000
+#define TARGET_VIS (target_flags & MASK_VIS)
+
+/* Compile for Solaris V8+. 32 bit Solaris preserves the high bits of
+ the current out and global registers. Linux saves the high bits on
+ context switches but not signals. */
+#define MASK_V8PLUS 0x2000000
+#define TARGET_V8PLUS (target_flags & MASK_V8PLUS)
+
+/* TARGET_HARD_MUL: Use hardware multiply instructions but not %y.
+ TARGET_HARD_MUL32: Use hardware multiply instructions with rd %y
+ to get high 32 bits. False in V8+ or V9 because multiply stores
+ a 64 bit result in a register. */
+
+#define TARGET_HARD_MUL32 \
+ ((TARGET_V8 || TARGET_SPARCLITE \
+ || TARGET_SPARCLET || TARGET_DEPRECATED_V8_INSNS) \
+ && ! TARGET_V8PLUS)
+
+#define TARGET_HARD_MUL \
+ (TARGET_V8 || TARGET_SPARCLITE || TARGET_SPARCLET \
+ || TARGET_DEPRECATED_V8_INSNS || TARGET_V8PLUS)
+
+
+/* Macro to define tables used to set the flags.
+ This is a list in braces of pairs in braces,
+ each pair being { "NAME", VALUE }
+ where VALUE is the bits to set or minus the bits to clear.
+ An empty string NAME is used to identify the default VALUE. */
+
+#define TARGET_SWITCHES \
+ { {"fpu", MASK_FPU | MASK_FPU_SET}, \
+ {"no-fpu", -MASK_FPU}, \
+ {"no-fpu", MASK_FPU_SET}, \
+ {"hard-float", MASK_FPU | MASK_FPU_SET}, \
+ {"soft-float", -MASK_FPU}, \
+ {"soft-float", MASK_FPU_SET}, \
+ {"epilogue", MASK_EPILOGUE}, \
+ {"no-epilogue", -MASK_EPILOGUE}, \
+ {"unaligned-doubles", MASK_UNALIGNED_DOUBLES}, \
+ {"no-unaligned-doubles", -MASK_UNALIGNED_DOUBLES}, \
+ {"impure-text", MASK_IMPURE_TEXT}, \
+ {"no-impure-text", -MASK_IMPURE_TEXT}, \
+ {"flat", MASK_FLAT}, \
+ {"no-flat", -MASK_FLAT}, \
+ {"app-regs", MASK_APP_REGS}, \
+ {"no-app-regs", -MASK_APP_REGS}, \
+ {"hard-quad-float", MASK_HARD_QUAD}, \
+ {"soft-quad-float", -MASK_HARD_QUAD}, \
+ {"v8plus", MASK_V8PLUS}, \
+ {"no-v8plus", -MASK_V8PLUS}, \
+ {"vis", MASK_VIS}, \
+ /* ??? These are deprecated, coerced to -mcpu=. Delete in 2.9. */ \
+ {"cypress", 0}, \
+ {"sparclite", 0}, \
+ {"f930", 0}, \
+ {"f934", 0}, \
+ {"v8", 0}, \
+ {"supersparc", 0}, \
+ /* End of deprecated options. */ \
+ /* -mptrNN exists for *experimental* purposes. */ \
+/* {"ptr64", MASK_PTR64}, */ \
+/* {"ptr32", -MASK_PTR64}, */ \
+ {"32", -MASK_64BIT}, \
+ {"64", MASK_64BIT}, \
+ {"stack-bias", MASK_STACK_BIAS}, \
+ {"no-stack-bias", -MASK_STACK_BIAS}, \
+ SUBTARGET_SWITCHES \
+ { "", TARGET_DEFAULT}}
+
+/* MASK_APP_REGS must always be the default because that's what
+ FIXED_REGISTERS is set to and -ffixed- is processed before
+ CONDITIONAL_REGISTER_USAGE is called (where we process -mno-app-regs). */
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE + MASK_FPU)
+
+/* This is meant to be redefined in target specific files. */
+#define SUBTARGET_SWITCHES
+
+/* Processor type.
+ These must match the values for the cpu attribute in sparc.md. */
+enum processor_type {
+ PROCESSOR_V7,
+ PROCESSOR_CYPRESS,
+ PROCESSOR_V8,
+ PROCESSOR_SUPERSPARC,
+ PROCESSOR_SPARCLITE,
+ PROCESSOR_F930,
+ PROCESSOR_F934,
+ PROCESSOR_SPARCLET,
+ PROCESSOR_TSC701,
+ PROCESSOR_V9,
+ PROCESSOR_ULTRASPARC
+};
+
+/* This is set from -m{cpu,tune}=xxx. */
+extern enum processor_type sparc_cpu;
+
+/* Recast the cpu class to be the cpu attribute.
+ Every file includes us, but not every file includes insn-attr.h. */
+#define sparc_cpu_attr ((enum attr_cpu) sparc_cpu)
+
+/* This macro is similar to `TARGET_SWITCHES' but defines names of
+ command options that have values. Its definition is an
+ initializer with a subgrouping for each command option.
+
+ Each subgrouping contains a string constant, that defines the
+ fixed part of the option name, and the address of a variable.
+ The variable, type `char *', is set to the variable part of the
+ given option if the fixed part matches. The actual option name
+ is made by appending `-m' to the specified name.
+
+ Here is an example which defines `-mshort-data-NUMBER'. If the
+ given option is `-mshort-data-512', the variable `m88k_short_data'
+ will be set to the string `"512"'.
+
+ extern char *m88k_short_data;
+ #define TARGET_OPTIONS { { "short-data-", &m88k_short_data } } */
+
+#define TARGET_OPTIONS \
+{ \
+ { "cpu=", &sparc_select[1].string }, \
+ { "tune=", &sparc_select[2].string }, \
+ { "cmodel=", &sparc_cmodel_string }, \
+ { "align-loops=", &sparc_align_loops_string }, \
+ { "align-jumps=", &sparc_align_jumps_string }, \
+ { "align-functions=", &sparc_align_funcs_string }, \
+ SUBTARGET_OPTIONS \
+}
+
+/* This is meant to be redefined in target specific files. */
+#define SUBTARGET_OPTIONS
+
+/* sparc_select[0] is reserved for the default cpu. */
+struct sparc_cpu_select
+{
+ char *string;
+ char *name;
+ int set_tune_p;
+ int set_arch_p;
+};
+
+extern struct sparc_cpu_select sparc_select[];
+
+/* Variables to record values the user passes. */
+extern char *sparc_align_loops_string;
+extern char *sparc_align_jumps_string;
+extern char *sparc_align_funcs_string;
+/* Parsed values as a power of two. */
+extern int sparc_align_loops;
+extern int sparc_align_jumps;
+extern int sparc_align_funcs;
+
+#define DEFAULT_SPARC_ALIGN_FUNCS \
+(sparc_cpu == PROCESSOR_ULTRASPARC ? 5 : 2)
+
+/* target machine storage layout */
+
+/* Define for cross-compilation to a sparc target with no TFmode from a host
+ with a different float format (e.g. VAX). */
+#define REAL_ARITHMETIC
+
+/* Define this if most significant bit is lowest numbered
+ in instructions that operate on numbered bit-fields. */
+#define BITS_BIG_ENDIAN 1
+
+/* Define this if most significant byte of a word is the lowest numbered. */
+#define BYTES_BIG_ENDIAN 1
+
+/* Define this if most significant word of a multiword number is the lowest
+ numbered. */
+#define WORDS_BIG_ENDIAN 1
+
+/* Define this to set the endianness to use in libgcc2.c, which can
+ not depend on target_flags. */
+#if defined (__LITTLE_ENDIAN__)
+#define LIBGCC2_WORDS_BIG_ENDIAN 0
+#else
+#define LIBGCC2_WORDS_BIG_ENDIAN 1
+#endif
+
+/* number of bits in an addressable storage unit */
+#define BITS_PER_UNIT 8
+
+/* Width in bits of a "word", which is the contents of a machine register.
+ Note that this is not necessarily the width of data type `int';
+ if using 16-bit ints on a 68000, this would still be 32.
+ But on a machine with 16-bit registers, this would be 16. */
+#define BITS_PER_WORD (TARGET_ARCH64 ? 64 : 32)
+#define MAX_BITS_PER_WORD 64
+
+/* Width of a word, in units (bytes). */
+#define UNITS_PER_WORD (TARGET_ARCH64 ? 8 : 4)
+#define MIN_UNITS_PER_WORD 4
+
+/* Now define the sizes of the C data types. */
+
+#define SHORT_TYPE_SIZE 16
+#define INT_TYPE_SIZE 32
+#define LONG_TYPE_SIZE (TARGET_ARCH64 ? 64 : 32)
+#define LONG_LONG_TYPE_SIZE 64
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+
+#if defined (SPARC_BI_ARCH)
+#define MAX_LONG_TYPE_SIZE 64
+#endif
+
+#if 0
+/* ??? This does not work in SunOS 4.x, so it is not enabled here.
+ Instead, it is enabled in sol2.h, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 128
+#endif
+
+/* Width in bits of a pointer.
+ See also the macro `Pmode' defined below. */
+#define POINTER_SIZE (TARGET_PTR64 ? 64 : 32)
+
+/* A macro to update MODE and UNSIGNEDP when an object whose type
+ is TYPE and which has the specified mode and signedness is to be
+ stored in a register. This macro is only called when TYPE is a
+ scalar type. */
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+if (TARGET_ARCH64 \
+ && GET_MODE_CLASS (MODE) == MODE_INT \
+ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \
+{ \
+ (MODE) = DImode; \
+}
+
+/* Define this macro if the promotion described by PROMOTE_MODE
+ should also be done for outgoing function arguments. */
+/* This is only needed for TARGET_ARCH64, but since PROMOTE_MODE is a no-op
+ for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test
+ for this value. */
+#define PROMOTE_FUNCTION_ARGS
+
+/* Define this macro if the promotion described by PROMOTE_MODE
+ should also be done for the return value of functions.
+ If this macro is defined, FUNCTION_VALUE must perform the same
+ promotions done by PROMOTE_MODE. */
+/* This is only needed for TARGET_ARCH64, but since PROMOTE_MODE is a no-op
+ for TARGET_ARCH32 this is ok. Otherwise we'd need to add a runtime test
+ for this value. */
+#define PROMOTE_FUNCTION_RETURN
+
+/* Allocation boundary (in *bits*) for storing arguments in argument list. */
+#define PARM_BOUNDARY (TARGET_ARCH64 ? 64 : 32)
+
+/* Boundary (in *bits*) on which stack pointer should be aligned. */
+#define STACK_BOUNDARY (TARGET_ARCH64 ? 128 : 64)
+
+/* ALIGN FRAMES on double word boundaries */
+
+#define SPARC_STACK_ALIGN(LOC) \
+ (TARGET_ARCH64 ? (((LOC)+15) & ~15) : (((LOC)+7) & ~7))
+
+/* Allocation boundary (in *bits*) for the code of a function. */
+#define FUNCTION_BOUNDARY (1 << (sparc_align_funcs + 3))
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY (TARGET_ARCH64 ? 64 : 32)
+
+/* Every structure's size must be a multiple of this. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* A bitfield declared as `int' forces `int' alignment for the struct. */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT (TARGET_ARCH64 ? 128 : 64)
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 64
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Things that must be doubleword aligned cannot go in the text section,
+ because the linker fails to align the text section enough!
+ Put them in the data section. This macro is only used in this file. */
+#define MAX_TEXT_ALIGN 32
+
+/* This forces all variables and constants to the data section when PIC.
+ This is because the SunOS 4 shared library scheme thinks everything in
+ text is a function, and patches the address to point to a loader stub. */
+/* This is defined to zero for every system which doesn't use the a.out object
+ file format. */
+#ifndef SUNOS4_SHARED_LIBRARIES
+#define SUNOS4_SHARED_LIBRARIES 0
+#endif
+
+/* This is defined differently for v9 in a cover file. */
+#define SELECT_SECTION(T,RELOC) \
+{ \
+ if (TREE_CODE (T) == VAR_DECL) \
+ { \
+ if (TREE_READONLY (T) && ! TREE_SIDE_EFFECTS (T) \
+ && DECL_INITIAL (T) \
+ && (DECL_INITIAL (T) == error_mark_node \
+ || TREE_CONSTANT (DECL_INITIAL (T))) \
+ && DECL_ALIGN (T) <= MAX_TEXT_ALIGN \
+ && ! (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES))) \
+ text_section (); \
+ else \
+ data_section (); \
+ } \
+ else if (TREE_CODE (T) == CONSTRUCTOR) \
+ { \
+ if (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES)) \
+ data_section (); \
+ } \
+ else if (TREE_CODE_CLASS (TREE_CODE (T)) == 'c') \
+ { \
+ if ((TREE_CODE (T) == STRING_CST && flag_writable_strings) \
+ || TYPE_ALIGN (TREE_TYPE (T)) > MAX_TEXT_ALIGN \
+ || (flag_pic && ((RELOC) || SUNOS4_SHARED_LIBRARIES))) \
+ data_section (); \
+ else \
+ text_section (); \
+ } \
+}
+
+/* Use text section for a constant
+ unless we need more alignment than that offers. */
+/* This is defined differently for v9 in a cover file. */
+#define SELECT_RTX_SECTION(MODE, X) \
+{ \
+ if (GET_MODE_BITSIZE (MODE) <= MAX_TEXT_ALIGN \
+ && ! (flag_pic && (symbolic_operand (X) || SUNOS4_SHARED_LIBRARIES))) \
+ text_section (); \
+ else \
+ data_section (); \
+}
+
+/* Standard register usage. */
+
+/* Number of actual hardware registers.
+ The hardware registers are assigned numbers for the compiler
+ from 0 to just below FIRST_PSEUDO_REGISTER.
+ All registers that the compiler knows about must be given numbers,
+ even those that are not normally considered general registers.
+
+ SPARC has 32 integer registers and 32 floating point registers.
+ 64 bit SPARC has 32 additional fp regs, but the odd numbered ones are not
+ accessible. We still account for them to simplify register computations
+ (eg: in CLASS_MAX_NREGS). There are also 4 fp condition code registers, so
+ 32+32+32+4 == 100.
+ Register 100 is used as the integer condition code register. */
+
+#define FIRST_PSEUDO_REGISTER 101
+
+#define SPARC_FIRST_FP_REG 32
+/* Additional V9 fp regs. */
+#define SPARC_FIRST_V9_FP_REG 64
+#define SPARC_LAST_V9_FP_REG 95
+/* V9 %fcc[0123]. V8 uses (figuratively) %fcc0. */
+#define SPARC_FIRST_V9_FCC_REG 96
+#define SPARC_LAST_V9_FCC_REG 99
+/* V8 fcc reg. */
+#define SPARC_FCC_REG 96
+/* Integer CC reg. We don't distinguish %icc from %xcc. */
+#define SPARC_ICC_REG 100
+
+/* Nonzero if REGNO is an fp reg. */
+#define SPARC_FP_REG_P(REGNO) \
+((REGNO) >= SPARC_FIRST_FP_REG && (REGNO) <= SPARC_LAST_V9_FP_REG)
+
+/* Argument passing regs. */
+#define SPARC_OUTGOING_INT_ARG_FIRST 8
+#define SPARC_INCOMING_INT_ARG_FIRST (TARGET_FLAT ? 8 : 24)
+#define SPARC_FP_ARG_FIRST 32
+
+/* 1 for registers that have pervasive standard uses
+ and are not available for the register allocator.
+
+ On non-v9 systems:
+ g1 is free to use as temporary.
+ g2-g4 are reserved for applications. Gcc normally uses them as
+ temporaries, but this can be disabled via the -mno-app-regs option.
+ g5 through g7 are reserved for the operating system.
+
+ On v9 systems:
+ g1,g5 are free to use as temporaries, and are free to use between calls
+ if the call is to an external function via the PLT.
+ g4 is free to use as a temporary in the non-embedded case.
+ g4 is reserved in the embedded case.
+ g2-g3 are reserved for applications. Gcc normally uses them as
+ temporaries, but this can be disabled via the -mno-app-regs option.
+ g6-g7 are reserved for the operating system (or application in
+ embedded case).
+ ??? Register 1 is used as a temporary by the 64 bit sethi pattern, so must
+ currently be a fixed register until this pattern is rewritten.
+ Register 1 is also used when restoring call-preserved registers in large
+ stack frames.
+
+ Registers fixed in arch32 and not arch64 (or vice-versa) are marked in
+ CONDITIONAL_REGISTER_USAGE in order to properly handle -ffixed-.
+*/
+
+#define FIXED_REGISTERS \
+ {1, 0, 0, 0, 0, 0, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ \
+ 0, 0, 0, 0, 0}
+
+/* 1 for registers not available across function calls.
+ These must include the FIXED_REGISTERS and also any
+ registers that can be used without being saved.
+ The latter must include the registers where values are returned
+ and the register where structure-value addresses are passed.
+ Aside from that, you can include as many other registers as you like. */
+
+#define CALL_USED_REGISTERS \
+ {1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 1, \
+ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ \
+ 1, 1, 1, 1, 1}
+
+/* If !TARGET_FPU, then make the fp registers and fp cc regs fixed so that
+ they won't be allocated. */
+
+#define CONDITIONAL_REGISTER_USAGE \
+do \
+ { \
+ if (TARGET_ARCH32) \
+ { \
+ fixed_regs[5] = 1; \
+ } \
+ else \
+ { \
+ fixed_regs[1] = 1; \
+ } \
+ if (! TARGET_V9) \
+ { \
+ int regno; \
+ for (regno = SPARC_FIRST_V9_FP_REG; \
+ regno <= SPARC_LAST_V9_FP_REG; \
+ regno++) \
+ fixed_regs[regno] = 1; \
+ /* %fcc0 is used by v8 and v9. */ \
+ for (regno = SPARC_FIRST_V9_FCC_REG + 1; \
+ regno <= SPARC_LAST_V9_FCC_REG; \
+ regno++) \
+ fixed_regs[regno] = 1; \
+ } \
+ if (! TARGET_FPU) \
+ { \
+ int regno; \
+ for (regno = 32; regno < SPARC_LAST_V9_FCC_REG; regno++) \
+ fixed_regs[regno] = 1; \
+ } \
+ /* Don't unfix g2-g4 if they were fixed with -ffixed-. */ \
+ fixed_regs[2] |= ! TARGET_APP_REGS; \
+ fixed_regs[3] |= ! TARGET_APP_REGS; \
+ fixed_regs[4] |= ! TARGET_APP_REGS || TARGET_CM_EMBMEDANY; \
+ if (TARGET_FLAT) \
+ { \
+ /* Let the compiler believe the frame pointer is still \
+ %fp, but output it as %i7. */ \
+ fixed_regs[31] = 1; \
+ reg_names[FRAME_POINTER_REGNUM] = "%i7"; \
+ /* ??? This is a hack to disable leaf functions. */ \
+ global_regs[7] = 1; \
+ } \
+ if (profile_block_flag) \
+ { \
+ /* %g1 and %g2 must be fixed, because BLOCK_PROFILER \
+ uses them. */ \
+ fixed_regs[1] = 1; \
+ fixed_regs[2] = 1; \
+ } \
+ if (flag_pic != 0) \
+ { \
+ fixed_regs[23] = 1; \
+ call_used_regs[23] = 1; \
+ } \
+ } \
+while (0)
+
+/* Return number of consecutive hard regs needed starting at reg REGNO
+ to hold something of mode MODE.
+ This is ordinarily the length in words of a value of mode MODE
+ but can be less for certain modes in special long registers.
+
+ On SPARC, ordinary registers hold 32 bits worth;
+ this means both integer and floating point registers.
+ On v9, integer regs hold 64 bits worth; floating point regs hold
+ 32 bits worth (this includes the new fp regs as even the odd ones are
+ included in the hard register count). */
+
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ (TARGET_ARCH64 \
+ ? ((REGNO) < 32 \
+ ? (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD \
+ : (GET_MODE_SIZE (MODE) + 3) / 4) \
+ : ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD))
+
+/* A subreg in 64 bit mode will have the wrong offset for a floating point
+ register. The least significant part is at offset 1, compared to 0 for
+ integer registers. */
+#define ALTER_HARD_SUBREG(TMODE, WORD, FMODE, REGNO) \
+ (TARGET_ARCH64 && (REGNO) >= 32 && (REGNO) < 96 && (TMODE) == SImode ? 1 : ((REGNO) + (WORD)))
+
+/* Value is 1 if hard register REGNO can hold a value of machine-mode MODE.
+ See sparc.c for how we initialize this. */
+extern int *hard_regno_mode_classes;
+extern int sparc_mode_class[];
+#define HARD_REGNO_MODE_OK(REGNO, MODE) \
+ ((hard_regno_mode_classes[REGNO] & sparc_mode_class[MODE]) != 0)
+
+/* Value is 1 if it is a good idea to tie two pseudo registers
+ when one has mode MODE1 and one has mode MODE2.
+ If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
+ for any hard reg, then this must be 0 for correct output.
+
+ For V9: SFmode can't be combined with other float modes, because they can't
+ be allocated to the %d registers. Also, DFmode won't fit in odd %f
+ registers, but SFmode will. */
+#define MODES_TIEABLE_P(MODE1, MODE2) \
+ ((MODE1) == (MODE2) \
+ || (GET_MODE_CLASS (MODE1) == GET_MODE_CLASS (MODE2) \
+ && (! TARGET_V9 \
+ || (GET_MODE_CLASS (MODE1) != MODE_FLOAT \
+ || (MODE1 != SFmode && MODE2 != SFmode)))))
+
+/* Specify the registers used for certain standard purposes.
+ The values of these macros are register numbers. */
+
+/* SPARC pc isn't overloaded on a register that the compiler knows about. */
+/* #define PC_REGNUM */
+
+/* Register to use for pushing function arguments. */
+#define STACK_POINTER_REGNUM 14
+
+/* Actual top-of-stack address is 92/176 greater than the contents of the
+ stack pointer register for !v9/v9. That is:
+ - !v9: 64 bytes for the in and local registers, 4 bytes for structure return
+ address, and 6*4 bytes for the 6 register parameters.
+ - v9: 128 bytes for the in and local registers + 6*8 bytes for the integer
+ parameter regs. */
+#define STACK_POINTER_OFFSET FIRST_PARM_OFFSET(0)
+
+/* The stack bias (amount by which the hardware register is offset by). */
+#define SPARC_STACK_BIAS ((TARGET_ARCH64 && TARGET_STACK_BIAS) ? 2047 : 0)
+
+/* Is stack biased? */
+#define STACK_BIAS SPARC_STACK_BIAS
+
+/* Base register for access to local variables of the function. */
+#define FRAME_POINTER_REGNUM 30
+
+#if 0
+/* Register that is used for the return address for the flat model. */
+#define RETURN_ADDR_REGNUM 15
+#endif
+
+/* Value should be nonzero if functions must have frame pointers.
+ Zero means the frame pointer need not be set up (and parms
+ may be accessed via the stack pointer) in functions that seem suitable.
+ This is computed in `reload', in reload1.c.
+ Used in flow.c, global.c, and reload1.c.
+
+ Being a non-leaf function does not mean a frame pointer is needed in the
+ flat window model. However, the debugger won't be able to backtrace through
+ us with out it. */
+#define FRAME_POINTER_REQUIRED \
+ (TARGET_FLAT ? (current_function_calls_alloca || current_function_varargs \
+ || !leaf_function_p ()) \
+ : ! (leaf_function_p () && only_leaf_regs_used ()))
+
+/* C statement to store the difference between the frame pointer
+ and the stack pointer values immediately after the function prologue.
+
+ Note, we always pretend that this is a leaf function because if
+ it's not, there's no point in trying to eliminate the
+ frame pointer. If it is a leaf function, we guessed right! */
+#define INITIAL_FRAME_POINTER_OFFSET(VAR) \
+ ((VAR) = (TARGET_FLAT ? sparc_flat_compute_frame_size (get_frame_size ()) \
+ : compute_frame_size (get_frame_size (), 1)))
+
+/* Base register for access to arguments of the function. */
+#define ARG_POINTER_REGNUM FRAME_POINTER_REGNUM
+
+/* Register in which static-chain is passed to a function. This must
+ not be a register used by the prologue. */
+#define STATIC_CHAIN_REGNUM (TARGET_ARCH64 ? 5 : 2)
+
+/* Register which holds offset table for position-independent
+ data references. */
+
+#define PIC_OFFSET_TABLE_REGNUM 23
+
+#define INITIALIZE_PIC initialize_pic ()
+#define FINALIZE_PIC finalize_pic ()
+
+/* Pick a default value we can notice from override_options:
+ !v9: Default is on.
+ v9: Default is off. */
+
+#define DEFAULT_PCC_STRUCT_RETURN -1
+
+/* Sparc ABI says that quad-precision floats and all structures are returned
+ in memory.
+ For v9: unions <= 32 bytes in size are returned in int regs,
+ structures up to 32 bytes are returned in int and fp regs. */
+
+#define RETURN_IN_MEMORY(TYPE) \
+(TARGET_ARCH32 \
+ ? (TYPE_MODE (TYPE) == BLKmode \
+ || TYPE_MODE (TYPE) == TFmode \
+ || TYPE_MODE (TYPE) == TCmode) \
+ : (TYPE_MODE (TYPE) == BLKmode \
+ && int_size_in_bytes (TYPE) > 32))
+
+/* Functions which return large structures get the address
+ to place the wanted value at offset 64 from the frame.
+ Must reserve 64 bytes for the in and local registers.
+ v9: Functions which return large structures get the address to place the
+ wanted value from an invisible first argument. */
+/* Used only in other #defines in this file. */
+#define STRUCT_VALUE_OFFSET 64
+
+#define STRUCT_VALUE \
+ (TARGET_ARCH64 \
+ ? 0 \
+ : gen_rtx_MEM (Pmode, \
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx, \
+ GEN_INT (STRUCT_VALUE_OFFSET))))
+#define STRUCT_VALUE_INCOMING \
+ (TARGET_ARCH64 \
+ ? 0 \
+ : gen_rtx_MEM (Pmode, \
+ gen_rtx_PLUS (Pmode, frame_pointer_rtx, \
+ GEN_INT (STRUCT_VALUE_OFFSET))))
+
+/* Define the classes of registers for register constraints in the
+ machine description. Also define ranges of constants.
+
+ One of the classes must always be named ALL_REGS and include all hard regs.
+ If there is more than one class, another class must be named NO_REGS
+ and contain no registers.
+
+ The name GENERAL_REGS must be the name of a class (or an alias for
+ another name such as ALL_REGS). This is the class of registers
+ that is allowed by "g" or "r" in a register constraint.
+ Also, registers outside this class are allocated only when
+ instructions express preferences for them.
+
+ The classes must be numbered in nondecreasing order; that is,
+ a larger-numbered class must never be contained completely
+ in a smaller-numbered class.
+
+ For any two classes, it is very desirable that there be another
+ class that represents their union. */
+
+/* The SPARC has various kinds of registers: general, floating point,
+ and condition codes [well, it has others as well, but none that we
+ care directly about].
+
+ For v9 we must distinguish between the upper and lower floating point
+ registers because the upper ones can't hold SFmode values.
+ HARD_REGNO_MODE_OK won't help here because reload assumes that register(s)
+ satisfying a group need for a class will also satisfy a single need for
+ that class. EXTRA_FP_REGS is a bit of a misnomer as it covers all 64 fp
+ regs.
+
+ It is important that one class contains all the general and all the standard
+ fp regs. Otherwise find_reg() won't properly allocate int regs for moves,
+ because reg_class_record() will bias the selection in favor of fp regs,
+ because reg_class_subunion[GENERAL_REGS][FP_REGS] will yield FP_REGS,
+ because FP_REGS > GENERAL_REGS.
+
+ It is also important that one class contain all the general and all the
+ fp regs. Otherwise when spilling a DFmode reg, it may be from EXTRA_FP_REGS
+ but find_reloads() may use class GENERAL_OR_FP_REGS. This will cause
+ allocate_reload_reg() to bypass it causing an abort because the compiler
+ thinks it doesn't have a spill reg when in fact it does.
+
+ v9 also has 4 floating point condition code registers. Since we don't
+ have a class that is the union of FPCC_REGS with either of the others,
+ it is important that it appear first. Otherwise the compiler will die
+ trying to compile _fixunsdfsi because fix_truncdfsi2 won't match its
+ constraints.
+
+ It is important that SPARC_ICC_REG have class NO_REGS. Otherwise combine
+ may try to use it to hold an SImode value. See register_operand.
+ ??? Should %fcc[0123] be handled similarly?
+*/
+
+enum reg_class { NO_REGS, FPCC_REGS, I64_REGS, GENERAL_REGS, FP_REGS,
+ EXTRA_FP_REGS, GENERAL_OR_FP_REGS, GENERAL_OR_EXTRA_FP_REGS,
+ ALL_REGS, LIM_REG_CLASSES };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+/* Give names of register classes as strings for dump file. */
+
+#define REG_CLASS_NAMES \
+ { "NO_REGS", "FPCC_REGS", "I64_REGS", "GENERAL_REGS", "FP_REGS", \
+ "EXTRA_FP_REGS", "GENERAL_OR_FP_REGS", "GENERAL_OR_EXTRA_FP_REGS", \
+ "ALL_REGS" }
+
+/* Define which registers fit in which classes.
+ This is an initializer for a vector of HARD_REG_SET
+ of length N_REG_CLASSES. */
+
+#define REG_CLASS_CONTENTS \
+ {{0, 0, 0, 0}, {0, 0, 0, 0xf}, {0xffff, 0, 0, 0}, \
+ {-1, 0, 0, 0}, {0, -1, 0, 0}, {0, -1, -1, 0}, \
+ {-1, -1, 0, 0}, {-1, -1, -1, 0}, {-1, -1, -1, 0x1f}}
+
+/* The same information, inverted:
+ Return the class number of the smallest class containing
+ reg number REGNO. This could be a conditional expression
+ or could index an array. */
+
+extern enum reg_class sparc_regno_reg_class[];
+
+#define REGNO_REG_CLASS(REGNO) sparc_regno_reg_class[(REGNO)]
+
+/* This is the order in which to allocate registers normally.
+
+ We put %f0/%f1 last among the float registers, so as to make it more
+ likely that a pseudo-register which dies in the float return register
+ will get allocated to the float return register, thus saving a move
+ instruction at the end of the function. */
+
+#define REG_ALLOC_ORDER \
+{ 8, 9, 10, 11, 12, 13, 2, 3, \
+ 15, 16, 17, 18, 19, 20, 21, 22, \
+ 23, 24, 25, 26, 27, 28, 29, 31, \
+ 34, 35, 36, 37, 38, 39, /* %f2-%f7 */ \
+ 40, 41, 42, 43, 44, 45, 46, 47, /* %f8-%f15 */ \
+ 48, 49, 50, 51, 52, 53, 54, 55, /* %f16-%f23 */ \
+ 56, 57, 58, 59, 60, 61, 62, 63, /* %f24-%f31 */ \
+ 64, 65, 66, 67, 68, 69, 70, 71, /* %f32-%f39 */ \
+ 72, 73, 74, 75, 76, 77, 78, 79, /* %f40-%f47 */ \
+ 80, 81, 82, 83, 84, 85, 86, 87, /* %f48-%f55 */ \
+ 88, 89, 90, 91, 92, 93, 94, 95, /* %f56-%f63 */ \
+ 32, 33, /* %f0,%f1 */ \
+ 96, 97, 98, 99, 100, /* %fcc0-3, %icc */ \
+ 1, 4, 5, 6, 7, 0, 14, 30}
+
+/* This is the order in which to allocate registers for
+ leaf functions. If all registers can fit in the "i" registers,
+ then we have the possibility of having a leaf function. */
+
+#define REG_LEAF_ALLOC_ORDER \
+{ 2, 3, 24, 25, 26, 27, 28, 29, \
+ 15, 8, 9, 10, 11, 12, 13, \
+ 16, 17, 18, 19, 20, 21, 22, 23, \
+ 34, 35, 36, 37, 38, 39, \
+ 40, 41, 42, 43, 44, 45, 46, 47, \
+ 48, 49, 50, 51, 52, 53, 54, 55, \
+ 56, 57, 58, 59, 60, 61, 62, 63, \
+ 64, 65, 66, 67, 68, 69, 70, 71, \
+ 72, 73, 74, 75, 76, 77, 78, 79, \
+ 80, 81, 82, 83, 84, 85, 86, 87, \
+ 88, 89, 90, 91, 92, 93, 94, 95, \
+ 32, 33, \
+ 96, 97, 98, 99, 100, \
+ 1, 4, 5, 6, 7, 0, 14, 30, 31}
+
+#define ORDER_REGS_FOR_LOCAL_ALLOC order_regs_for_local_alloc ()
+
+/* ??? %g7 is not a leaf register to effectively #undef LEAF_REGISTERS when
+ -mflat is used. Function only_leaf_regs_used will return 0 if a global
+ register is used and is not permitted in a leaf function. We make %g7
+ a global reg if -mflat and voila. Since %g7 is a system register and is
+ fixed it won't be used by gcc anyway. */
+
+#define LEAF_REGISTERS \
+{ 1, 1, 1, 1, 1, 1, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 1, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 0, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1}
+
+extern char leaf_reg_remap[];
+#define LEAF_REG_REMAP(REGNO) (leaf_reg_remap[REGNO])
+
+/* The class value for index registers, and the one for base regs. */
+#define INDEX_REG_CLASS GENERAL_REGS
+#define BASE_REG_CLASS GENERAL_REGS
+
+/* Local macro to handle the two v9 classes of FP regs. */
+#define FP_REG_CLASS_P(CLASS) ((CLASS) == FP_REGS || (CLASS) == EXTRA_FP_REGS)
+
+/* Get reg_class from a letter such as appears in the machine description.
+ In the not-v9 case, coerce v9's 'e' class to 'f', so we can use 'e' in the
+ .md file for v8 and v9.
+ 'd' and 'b' are used for single and double precision VIS operations,
+ if TARGET_VIS.
+ 'h' is used for V8+ 64 bit global and out registers. */
+
+#define REG_CLASS_FROM_LETTER(C) \
+(TARGET_V9 \
+ ? ((C) == 'f' ? FP_REGS \
+ : (C) == 'e' ? EXTRA_FP_REGS \
+ : (C) == 'c' ? FPCC_REGS \
+ : ((C) == 'd' && TARGET_VIS) ? FP_REGS\
+ : ((C) == 'b' && TARGET_VIS) ? EXTRA_FP_REGS\
+ : ((C) == 'h' && TARGET_V8PLUS) ? I64_REGS\
+ : NO_REGS) \
+ : ((C) == 'f' ? FP_REGS \
+ : (C) == 'e' ? FP_REGS \
+ : (C) == 'c' ? FPCC_REGS \
+ : NO_REGS))
+
+/* The letters I, J, K, L and M in a register constraint string
+ can be used to stand for particular ranges of immediate operands.
+ This macro defines what the ranges are.
+ C is the letter, and VALUE is a constant value.
+ Return 1 if VALUE is in the range specified by C.
+
+ `I' is used for the range of constants an insn can actually contain.
+ `J' is used for the range which is just zero (since that is R0).
+ `K' is used for constants which can be loaded with a single sethi insn.
+ `L' is used for the range of constants supported by the movcc insns.
+ `M' is used for the range of constants supported by the movrcc insns. */
+
+#define SPARC_SIMM10_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x200 < 0x400)
+#define SPARC_SIMM11_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x400 < 0x800)
+#define SPARC_SIMM13_P(X) ((unsigned HOST_WIDE_INT) (X) + 0x1000 < 0x2000)
+/* 10 and 11 bit immediates are only used for a few specific insns.
+ SMALL_INT is used throughout the port so we continue to use it. */
+#define SMALL_INT(X) (SPARC_SIMM13_P (INTVAL (X)))
+/* 13 bit immediate, considering only the low 32 bits */
+#define SMALL_INT32(X) (SPARC_SIMM13_P ((int)INTVAL (X) & 0xffffffff))
+#define SPARC_SETHI_P(X) \
+(((unsigned HOST_WIDE_INT) (X) & ~(unsigned HOST_WIDE_INT) 0xfffffc00) == 0)
+
+#define CONST_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'I' ? SPARC_SIMM13_P (VALUE) \
+ : (C) == 'J' ? (VALUE) == 0 \
+ : (C) == 'K' ? SPARC_SETHI_P (VALUE) \
+ : (C) == 'L' ? SPARC_SIMM11_P (VALUE) \
+ : (C) == 'M' ? SPARC_SIMM10_P (VALUE) \
+ : 0)
+
+/* Similar, but for floating constants, and defining letters G and H.
+ Here VALUE is the CONST_DOUBLE rtx itself. */
+
+#define CONST_DOUBLE_OK_FOR_LETTER_P(VALUE, C) \
+ ((C) == 'G' ? fp_zero_operand (VALUE) \
+ : (C) == 'H' ? arith_double_operand (VALUE, DImode) \
+ : 0)
+
+/* Given an rtx X being reloaded into a reg required to be
+ in class CLASS, return the class of reg to actually use.
+ In general this is just CLASS; but on some machines
+ in some cases it is preferable to use a more restrictive class. */
+/* We can't load constants into FP registers. We can't load any FP constant
+ if an 'E' constraint fails to match it. */
+#define PREFERRED_RELOAD_CLASS(X,CLASS) \
+ (CONSTANT_P (X) \
+ && (FP_REG_CLASS_P (CLASS) \
+ || (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ && (HOST_FLOAT_FORMAT != IEEE_FLOAT_FORMAT \
+ || HOST_BITS_PER_INT != BITS_PER_WORD))) \
+ ? NO_REGS : (CLASS))
+
+/* Return the register class of a scratch register needed to load IN into
+ a register of class CLASS in MODE.
+
+ On the SPARC, when PIC, we need a temporary when loading some addresses
+ into a register.
+
+ Also, we need a temporary when loading/storing a HImode/QImode value
+ between memory and the FPU registers. This can happen when combine puts
+ a paradoxical subreg in a float/fix conversion insn. */
+
+#define SECONDARY_INPUT_RELOAD_CLASS(CLASS, MODE, IN) \
+ ((FP_REG_CLASS_P (CLASS) && ((MODE) == HImode || (MODE) == QImode) \
+ && (GET_CODE (IN) == MEM \
+ || ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
+ && true_regnum (IN) == -1))) ? GENERAL_REGS : NO_REGS)
+
+#define SECONDARY_OUTPUT_RELOAD_CLASS(CLASS, MODE, IN) \
+ ((FP_REG_CLASS_P (CLASS) && ((MODE) == HImode || (MODE) == QImode) \
+ && (GET_CODE (IN) == MEM \
+ || ((GET_CODE (IN) == REG || GET_CODE (IN) == SUBREG) \
+ && true_regnum (IN) == -1))) ? GENERAL_REGS : NO_REGS)
+
+/* On SPARC it is not possible to directly move data between
+ GENERAL_REGS and FP_REGS. */
+#define SECONDARY_MEMORY_NEEDED(CLASS1, CLASS2, MODE) \
+ (FP_REG_CLASS_P (CLASS1) != FP_REG_CLASS_P (CLASS2))
+
+/* Return the stack location to use for secondary memory needed reloads.
+ We want to use the reserved location just below the frame pointer.
+ However, we must ensure that there is a frame, so use assign_stack_local
+ if the frame size is zero. */
+#define SECONDARY_MEMORY_NEEDED_RTX(MODE) \
+ (get_frame_size () == 0 \
+ ? assign_stack_local (MODE, GET_MODE_SIZE (MODE), 0) \
+ : gen_rtx_MEM (MODE, gen_rtx_PLUS (Pmode, frame_pointer_rtx, \
+ GEN_INT (STARTING_FRAME_OFFSET))))
+
+/* Get_secondary_mem widens its argument to BITS_PER_WORD which loses on v9
+ because the movsi and movsf patterns don't handle r/f moves.
+ For v8 we copy the default definition. */
+#define SECONDARY_MEMORY_NEEDED_MODE(MODE) \
+ (TARGET_ARCH64 \
+ ? (GET_MODE_BITSIZE (MODE) < 32 \
+ ? mode_for_size (32, GET_MODE_CLASS (MODE), 0) \
+ : MODE) \
+ : (GET_MODE_BITSIZE (MODE) < BITS_PER_WORD \
+ ? mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (MODE), 0) \
+ : MODE))
+
+/* Return the maximum number of consecutive registers
+ needed to represent mode MODE in a register of class CLASS. */
+/* On SPARC, this is the size of MODE in words. */
+#define CLASS_MAX_NREGS(CLASS, MODE) \
+ (FP_REG_CLASS_P (CLASS) ? (GET_MODE_SIZE (MODE) + 3) / 4 \
+ : (GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+/* Stack layout; function entry, exit and calling. */
+
+/* Define the number of register that can hold parameters.
+ This macro is only used in other macro definitions below and in sparc.c.
+ MODE is the mode of the argument.
+ !v9: All args are passed in %o0-%o5.
+ v9: %o0-%o5 and %f0-%f31 are cumulatively used to pass values.
+ See the description in sparc.c. */
+#define NPARM_REGS(MODE) \
+(TARGET_ARCH64 \
+ ? (GET_MODE_CLASS (MODE) == MODE_FLOAT ? 32 : 6) \
+ : 6)
+
+/* Define this if pushing a word on the stack
+ makes the stack pointer a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+/* Define this if the nominal address of the stack frame
+ is at the high-address end of the local variables;
+ that is, each additional local variable allocated
+ goes at a more negative offset in the frame. */
+#define FRAME_GROWS_DOWNWARD
+
+/* Offset within stack frame to start allocating local variables at.
+ If FRAME_GROWS_DOWNWARD, this is the offset to the END of the
+ first local allocated. Otherwise, it is the offset to the BEGINNING
+ of the first local allocated. */
+/* This allows space for one TFmode floating point value. */
+#define STARTING_FRAME_OFFSET \
+ (TARGET_ARCH64 ? (SPARC_STACK_BIAS - 16) \
+ : (-SPARC_STACK_ALIGN (LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)))
+
+/* If we generate an insn to push BYTES bytes,
+ this says how many the stack pointer really advances by.
+ On SPARC, don't define this because there are no push insns. */
+/* #define PUSH_ROUNDING(BYTES) */
+
+/* Offset of first parameter from the argument pointer register value.
+ !v9: This is 64 for the ins and locals, plus 4 for the struct-return reg
+ even if this function isn't going to use it.
+ v9: This is 128 for the ins and locals. */
+#define FIRST_PARM_OFFSET(FNDECL) \
+ (TARGET_ARCH64 ? (SPARC_STACK_BIAS + 16 * UNITS_PER_WORD) \
+ : (STRUCT_VALUE_OFFSET + UNITS_PER_WORD))
+
+/* When a parameter is passed in a register, stack space is still
+ allocated for it.
+ !v9: All 6 possible integer registers have backing store allocated.
+ v9: Only space for the arguments passed is allocated. */
+/* ??? Ideally, we'd use zero here (as the minimum), but zero has special
+ meaning to the backend. Further, we need to be able to detect if a
+ varargs/unprototyped function is called, as they may want to spill more
+ registers than we've provided space. Ugly, ugly. So for now we retain
+ all 6 slots even for v9. */
+#define REG_PARM_STACK_SPACE(DECL) (6 * UNITS_PER_WORD)
+
+/* Keep the stack pointer constant throughout the function.
+ This is both an optimization and a necessity: longjmp
+ doesn't behave itself when the stack pointer moves within
+ the function! */
+#define ACCUMULATE_OUTGOING_ARGS
+
+/* Value is the number of bytes of arguments automatically
+ popped when returning from a subroutine call.
+ FUNDECL is the declaration node of the function (as a tree),
+ FUNTYPE is the data type of the function (as a tree),
+ or for a library call it is an identifier node for the subroutine name.
+ SIZE is the number of bytes of arguments passed on the stack. */
+
+#define RETURN_POPS_ARGS(FUNDECL,FUNTYPE,SIZE) 0
+
+/* Some subroutine macros specific to this machine.
+ When !TARGET_FPU, put float return values in the general registers,
+ since we don't have any fp registers. */
+#define BASE_RETURN_VALUE_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 : 8) \
+ : (((MODE) == SFmode || (MODE) == DFmode) && TARGET_FPU ? 32 : 8))
+
+#define BASE_OUTGOING_VALUE_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 \
+ : TARGET_FLAT ? 8 : 24) \
+ : (((MODE) == SFmode || (MODE) == DFmode) && TARGET_FPU ? 32 \
+ : (TARGET_FLAT ? 8 : 24)))
+
+#define BASE_PASSING_ARG_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 : 8) \
+ : 8)
+
+/* ??? FIXME -- seems wrong for v9 structure passing... */
+#define BASE_INCOMING_ARG_REG(MODE) \
+ (TARGET_ARCH64 \
+ ? (TARGET_FPU && FLOAT_MODE_P (MODE) ? 32 \
+ : TARGET_FLAT ? 8 : 24) \
+ : (TARGET_FLAT ? 8 : 24))
+
+/* Define this macro if the target machine has "register windows". This
+ C expression returns the register number as seen by the called function
+ corresponding to register number OUT as seen by the calling function.
+ Return OUT if register number OUT is not an outbound register. */
+
+#define INCOMING_REGNO(OUT) \
+ ((TARGET_FLAT || (OUT) < 8 || (OUT) > 15) ? (OUT) : (OUT) + 16)
+
+/* Define this macro if the target machine has "register windows". This
+ C expression returns the register number as seen by the calling function
+ corresponding to register number IN as seen by the called function.
+ Return IN if register number IN is not an inbound register. */
+
+#define OUTGOING_REGNO(IN) \
+ ((TARGET_FLAT || (IN) < 24 || (IN) > 31) ? (IN) : (IN) - 16)
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its FUNCTION_DECL;
+ otherwise, FUNC is 0. */
+
+/* On SPARC the value is found in the first "output" register. */
+
+extern struct rtx_def *function_value ();
+#define FUNCTION_VALUE(VALTYPE, FUNC) \
+ function_value ((VALTYPE), TYPE_MODE (VALTYPE), 1)
+
+/* But the called function leaves it in the first "input" register. */
+
+#define FUNCTION_OUTGOING_VALUE(VALTYPE, FUNC) \
+ function_value ((VALTYPE), TYPE_MODE (VALTYPE), 0)
+
+/* Define how to find the value returned by a library function
+ assuming the value has mode MODE. */
+
+#define LIBCALL_VALUE(MODE) \
+ function_value (NULL_TREE, (MODE), 1)
+
+/* 1 if N is a possible register number for a function value
+ as seen by the caller.
+ On SPARC, the first "output" reg is used for integer values,
+ and the first floating point register is used for floating point values. */
+
+#define FUNCTION_VALUE_REGNO_P(N) ((N) == 8 || (N) == 32)
+
+/* Define the size of space to allocate for the return value of an
+ untyped_call. */
+
+#define APPLY_RESULT_SIZE 16
+
+/* 1 if N is a possible register number for function argument passing.
+ On SPARC, these are the "output" registers. v9 also uses %f0-%f31. */
+
+#define FUNCTION_ARG_REGNO_P(N) \
+(TARGET_ARCH64 \
+ ? (((N) >= 8 && (N) <= 13) || ((N) >= 32 && (N) <= 63)) \
+ : ((N) >= 8 && (N) <= 13))
+
+/* Define a data type for recording info about an argument list
+ during the scan of that argument list. This data type should
+ hold all necessary information about the function itself
+ and about the args processed so far, enough to enable macros
+ such as FUNCTION_ARG to determine where the next arg should go.
+
+ On SPARC (!v9), this is a single integer, which is a number of words
+ of arguments scanned so far (including the invisible argument,
+ if any, which holds the structure-value-address).
+ Thus 7 or more means all following args should go on the stack.
+
+ For v9, we also need to know whether a prototype is present. */
+
+struct sparc_args {
+ int words; /* number of words passed so far */
+ int prototype_p; /* non-zero if a prototype is present */
+ int libcall_p; /* non-zero if a library call */
+};
+#define CUMULATIVE_ARGS struct sparc_args
+
+/* Initialize a variable CUM of type CUMULATIVE_ARGS
+ for a call to a function whose data type is FNTYPE.
+ For a library call, FNTYPE is 0. */
+
+extern void init_cumulative_args ();
+#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT) \
+init_cumulative_args (& (CUM), (FNTYPE), (LIBNAME), (INDIRECT));
+
+/* Update the data in CUM to advance over an argument
+ of mode MODE and data type TYPE.
+ TYPE is null for libcalls where that information may not be available. */
+
+extern void function_arg_advance ();
+#define FUNCTION_ARG_ADVANCE(CUM, MODE, TYPE, NAMED) \
+function_arg_advance (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* Determine where to put an argument to a function.
+ Value is zero to push the argument on the stack,
+ or a hard register in which to store the argument.
+
+ MODE is the argument's machine mode.
+ TYPE is the data type of the argument (as a tree).
+ This is null for libcalls where that information may
+ not be available.
+ CUM is a variable of type CUMULATIVE_ARGS which gives info about
+ the preceding args and about the function being called.
+ NAMED is nonzero if this argument is a named parameter
+ (otherwise it is an extra parameter matching an ellipsis). */
+
+extern struct rtx_def *function_arg ();
+#define FUNCTION_ARG(CUM, MODE, TYPE, NAMED) \
+function_arg (& (CUM), (MODE), (TYPE), (NAMED), 0)
+
+/* Define where a function finds its arguments.
+ This is different from FUNCTION_ARG because of register windows. */
+
+#define FUNCTION_INCOMING_ARG(CUM, MODE, TYPE, NAMED) \
+function_arg (& (CUM), (MODE), (TYPE), (NAMED), 1)
+
+/* For an arg passed partly in registers and partly in memory,
+ this is the number of registers used.
+ For args passed entirely in registers or entirely in memory, zero. */
+
+extern int function_arg_partial_nregs ();
+#define FUNCTION_ARG_PARTIAL_NREGS(CUM, MODE, TYPE, NAMED) \
+function_arg_partial_nregs (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* A C expression that indicates when an argument must be passed by reference.
+ If nonzero for an argument, a copy of that argument is made in memory and a
+ pointer to the argument is passed instead of the argument itself.
+ The pointer is passed in whatever way is appropriate for passing a pointer
+ to that type. */
+
+extern int function_arg_pass_by_reference ();
+#define FUNCTION_ARG_PASS_BY_REFERENCE(CUM, MODE, TYPE, NAMED) \
+function_arg_pass_by_reference (& (CUM), (MODE), (TYPE), (NAMED))
+
+/* If defined, a C expression which determines whether, and in which direction,
+ to pad out an argument with extra space. The value should be of type
+ `enum direction': either `upward' to pad above the argument,
+ `downward' to pad below, or `none' to inhibit padding. */
+
+#define FUNCTION_ARG_PADDING(MODE, TYPE) \
+function_arg_padding ((MODE), (TYPE))
+
+/* If defined, a C expression that gives the alignment boundary, in bits,
+ of an argument with the specified mode and type. If it is not defined,
+ PARM_BOUNDARY is used for all arguments.
+ For sparc64, objects requiring 16 byte alignment are passed that way. */
+
+#define FUNCTION_ARG_BOUNDARY(MODE, TYPE) \
+((TARGET_ARCH64 \
+ && (GET_MODE_ALIGNMENT (MODE) == 128 \
+ || ((TYPE) && TYPE_ALIGN (TYPE) == 128))) \
+ ? 128 : PARM_BOUNDARY)
+
+/* Define the information needed to generate branch and scc insns. This is
+ stored from the compare operation. Note that we can't use "rtx" here
+ since it hasn't been defined! */
+
+extern struct rtx_def *sparc_compare_op0, *sparc_compare_op1;
+
+/* Define the function that build the compare insn for scc and bcc. */
+
+extern struct rtx_def *gen_compare_reg ();
+
+/* This function handles all v9 scc insns */
+
+extern int gen_v9_scc ();
+
+/* Generate the special assembly code needed to tell the assembler whatever
+ it might need to know about the return value of a function.
+
+ For Sparc assemblers, we need to output a .proc pseudo-op which conveys
+ information to the assembler relating to peephole optimization (done in
+ the assembler). */
+
+#define ASM_DECLARE_RESULT(FILE, RESULT) \
+ fprintf ((FILE), "\t.proc\t0%lo\n", sparc_type_code (TREE_TYPE (RESULT)))
+
+/* Output the label for a function definition. */
+
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+do { \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+} while (0)
+
+/* This macro generates the assembly code for function entry.
+ FILE is a stdio stream to output the code to.
+ SIZE is an int: how many units of temporary storage to allocate.
+ Refer to the array `regs_ever_live' to determine which registers
+ to save; `regs_ever_live[I]' is nonzero if register number I
+ is ever used in the function. This macro is responsible for
+ knowing which registers should not be saved even if used. */
+
+/* On SPARC, move-double insns between fpu and cpu need an 8-byte block
+ of memory. If any fpu reg is used in the function, we allocate
+ such a block here, at the bottom of the frame, just in case it's needed.
+
+ If this function is a leaf procedure, then we may choose not
+ to do a "save" insn. The decision about whether or not
+ to do this is made in regclass.c. */
+
+extern int leaf_function;
+#define FUNCTION_PROLOGUE(FILE, SIZE) \
+ (TARGET_FLAT ? sparc_flat_output_function_prologue (FILE, (int)SIZE) \
+ : output_function_prologue (FILE, (int)SIZE, leaf_function))
+
+/* Output assembler code to FILE to increment profiler label # LABELNO
+ for profiling a function entry.
+
+ 32 bit sparc uses %g2 as the STATIC_CHAIN_REGNUM which gets clobbered
+ during profiling so we need to save/restore it around the call to mcount.
+ We're guaranteed that a save has just been done, and we use the space
+ allocated for intreg/fpreg value passing. */
+
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ do { \
+ char buf[20]; \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "LP", (LABELNO)); \
+ if (! TARGET_ARCH64) \
+ fputs ("\tst %g2,[%fp-4]\n", FILE); \
+ fputs ("\tsethi %hi(", FILE); \
+ assemble_name (FILE, buf); \
+ fputs ("),%o0\n", FILE); \
+ fputs ("\tcall mcount\n\tadd %o0,%lo(", FILE); \
+ assemble_name (FILE, buf); \
+ fputs ("),%o0\n", FILE); \
+ if (! TARGET_ARCH64) \
+ fputs ("\tld [%fp-4],%g2\n", FILE); \
+ } while (0)
+
+/* There are three profiling modes for basic blocks available.
+ The modes are selected at compile time by using the options
+ -a or -ax of the gnu compiler.
+ The variable `profile_block_flag' will be set according to the
+ selected option.
+
+ profile_block_flag == 0, no option used:
+
+ No profiling done.
+
+ profile_block_flag == 1, -a option used.
+
+ Count frequency of execution of every basic block.
+
+ profile_block_flag == 2, -ax option used.
+
+ Generate code to allow several different profiling modes at run time.
+ Available modes are:
+ Produce a trace of all basic blocks.
+ Count frequency of jump instructions executed.
+ In every mode it is possible to start profiling upon entering
+ certain functions and to disable profiling of some other functions.
+
+ The result of basic-block profiling will be written to a file `bb.out'.
+ If the -ax option is used parameters for the profiling will be read
+ from file `bb.in'.
+
+*/
+
+/* The following macro shall output assembler code to FILE
+ to initialize basic-block profiling.
+
+ If profile_block_flag == 2
+
+ Output code to call the subroutine `__bb_init_trace_func'
+ and pass two parameters to it. The first parameter is
+ the address of a block allocated in the object module.
+ The second parameter is the number of the first basic block
+ of the function.
+
+ The name of the block is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ The number of the first basic block of the function is
+ passed to the macro in BLOCK_OR_LABEL.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ parameter1 <- LPBX0
+ parameter2 <- BLOCK_OR_LABEL
+ call __bb_init_trace_func
+
+ else if profile_block_flag != 0
+
+ Output code to call the subroutine `__bb_init_func'
+ and pass one single parameter to it, which is the same
+ as the first parameter to `__bb_init_trace_func'.
+
+ The first word of this parameter is a flag which will be nonzero if
+ the object module has already been initialized. So test this word
+ first, and do not call `__bb_init_func' if the flag is nonzero.
+ Note: When profile_block_flag == 2 the test need not be done
+ but `__bb_init_trace_func' *must* be called.
+
+ BLOCK_OR_LABEL may be used to generate a label number as a
+ branch destination in case `__bb_init_func' will not be called.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ cmp (LPBX0),0
+ jne local_label
+ parameter1 <- LPBX0
+ call __bb_init_func
+local_label:
+
+*/
+
+#define FUNCTION_BLOCK_PROFILER(FILE, BLOCK_OR_LABEL) \
+do \
+ { \
+ int bol = (BLOCK_OR_LABEL); \
+ switch (profile_block_flag) \
+ { \
+ case 2: \
+ fprintf (FILE, "\tsethi %%hi(LPBX0),%%o0\n\tor %%o0,%%lo(LPBX0),%%o0\n\tsethi %%hi(%d),%%o1\n\tcall ___bb_init_trace_func\n\tor %%o1,%%lo(%d),%%o1\n",\
+ bol, bol); \
+ break; \
+ default: \
+ fprintf (FILE, "\tsethi %%hi(LPBX0),%%o0\n\tld [%%lo(LPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne LPY%d\n\tadd %%o0,%%lo(LPBX0),%%o0\n\tcall ___bb_init_func\n\tnop\nLPY%d:\n",\
+ bol, bol); \
+ break; \
+ } \
+ } \
+while (0)
+
+/* The following macro shall output assembler code to FILE
+ to increment a counter associated with basic block number BLOCKNO.
+
+ If profile_block_flag == 2
+
+ Output code to initialize the global structure `__bb' and
+ call the function `__bb_trace_func' which will increment the
+ counter.
+
+ `__bb' consists of two words. In the first word the number
+ of the basic block has to be stored. In the second word
+ the address of a block allocated in the object module
+ has to be stored.
+
+ The basic block number is given by BLOCKNO.
+
+ The address of the block is given by the label created with
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 0);
+
+ by FUNCTION_BLOCK_PROFILER.
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language the code to be
+ output looks like:
+
+ move BLOCKNO -> (__bb)
+ move LPBX0 -> (__bb+4)
+ call __bb_trace_func
+
+ Note that function `__bb_trace_func' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE
+ and MACHINE_STATE_RESTORE. The last two macros will be
+ used in the function `__bb_trace_func', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE.
+
+ else if profile_block_flag != 0
+
+ Output code to increment the counter directly.
+ Basic blocks are numbered separately from zero within each
+ compiled object module. The count associated with block number
+ BLOCKNO is at index BLOCKNO in an array of words; the name of
+ this array is a local symbol made with this statement:
+
+ ASM_GENERATE_INTERNAL_LABEL (BUFFER, "LPBX", 2);
+
+ Of course, since you are writing the definition of
+ `ASM_GENERATE_INTERNAL_LABEL' as well as that of this macro, you
+ can take a short cut in the definition of this macro and use the
+ name that you know will result.
+
+ If described in a virtual assembler language, the code to be
+ output looks like:
+
+ inc (LPBX2+4*BLOCKNO)
+
+*/
+
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+do \
+ { \
+ int blockn = (BLOCKNO); \
+ switch (profile_block_flag) \
+ { \
+ case 2: \
+ fprintf (FILE, "\tsethi %%hi(___bb),%%g1\n\tsethi %%hi(%d),%%g2\n\tor %%g2,%%lo(%d),%%g2\n\tst %%g2,[%%lo(___bb)+%%g1]\n\tsethi %%hi(LPBX0),%%g2\n\tor %%g2,%%lo(LPBX0),%%g2\n\tadd 4,%%g1,%%g1\n\tst %%g2,[%%lo(___bb)+%%g1]\n\tmov %%o7,%%g2\n\tcall ___bb_trace_func\n\tnop\n\tmov %%g2,%%o7\n",\
+ blockn, blockn); \
+ break; \
+ default: \
+ fprintf (FILE, "\tsethi %%hi(LPBX2+%d),%%g1\n\tld [%%lo(LPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(LPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+ break; \
+ } \
+ } \
+while(0)
+
+/* The following macro shall output assembler code to FILE
+ to indicate a return from function during basic-block profiling.
+
+ If profiling_block_flag == 2:
+
+ Output assembler code to call function `__bb_trace_ret'.
+
+ Note that function `__bb_trace_ret' must not change the
+ machine state, especially the flag register. To grant
+ this, you must output code to save and restore registers
+ either in this macro or in the macros MACHINE_STATE_SAVE_RET
+ and MACHINE_STATE_RESTORE_RET. The last two macros will be
+ used in the function `__bb_trace_ret', so you must make
+ sure that the function prologue does not change any
+ register prior to saving it with MACHINE_STATE_SAVE_RET.
+
+ else if profiling_block_flag != 0:
+
+ The macro will not be used, so it need not distinguish
+ these cases.
+*/
+
+#define FUNCTION_BLOCK_PROFILER_EXIT(FILE) \
+ fprintf (FILE, "\tcall ___bb_trace_ret\n\tnop\n" );
+
+/* The function `__bb_trace_func' is called in every basic block
+ and is not allowed to change the machine state. Saving (restoring)
+ the state can either be done in the BLOCK_PROFILER macro,
+ before calling function (rsp. after returning from function)
+ `__bb_trace_func', or it can be done inside the function by
+ defining the macros:
+
+ MACHINE_STATE_SAVE(ID)
+ MACHINE_STATE_RESTORE(ID)
+
+ In the latter case care must be taken, that the prologue code
+ of function `__bb_trace_func' does not already change the
+ state prior to saving it with MACHINE_STATE_SAVE.
+
+ The parameter `ID' is a string identifying a unique macro use.
+
+ On sparc it is sufficient to save the psw register to memory.
+ Unfortunately the psw register can be read in supervisor mode only,
+ so we read only the condition codes by using branch instructions
+ and hope that this is enough. */
+
+#define MACHINE_STATE_SAVE(ID) \
+ int ms_flags, ms_saveret; \
+ asm volatile( \
+ "mov %%g0,%0\n\
+ be,a LFLGNZ"ID"\n\
+ or %0,4,%0\n\
+LFLGNZ"ID":\n\
+ bcs,a LFLGNC"ID"\n\
+ or %0,1,%0\n\
+LFLGNC"ID":\n\
+ bvs,a LFLGNV"ID"\n\
+ or %0,2,%0\n\
+LFLGNV"ID":\n\
+ bneg,a LFLGNN"ID"\n\
+ or %0,8,%0\n\
+LFLGNN"ID":\n\
+ mov %%g2,%1" \
+ : "=r"(ms_flags), "=r"(ms_saveret));
+
+/* On sparc MACHINE_STATE_RESTORE restores the psw register from memory.
+ The psw register can be written in supervisor mode only,
+ which is true even for simple condition codes.
+ We use some combination of instructions to produce the
+ proper condition codes, but some flag combinations can not
+ be generated in this way. If this happens an unimplemented
+ instruction will be executed to abort the program. */
+
+#define MACHINE_STATE_RESTORE(ID) \
+{ extern char flgtab[] __asm__("LFLGTAB"ID); \
+ int scratch; \
+ asm volatile ( \
+ "jmpl %2+%1,%%g0\n\
+ ! Do part of VC in the delay slot here, as it needs 3 insns.\n\
+ addcc 2,%3,%%g0\n\
+LFLGTAB" ID ":\n\
+ ! 0\n\
+ ba LFLGRET"ID"\n\
+ orcc 1,%%g0,%%g0\n\
+ ! C\n\
+ ba LFLGRET"ID"\n\
+ addcc 2,%3,%%g0\n\
+ ! V\n\
+ unimp\n\
+ nop\n\
+ ! VC\n\
+ ba LFLGRET"ID"\n\
+ addxcc %4,%4,%0\n\
+ ! Z\n\
+ ba LFLGRET"ID"\n\
+ subcc %%g0,%%g0,%%g0\n\
+ ! ZC\n\
+ ba LFLGRET"ID"\n\
+ addcc 1,%3,%0\n\
+ ! ZVC\n\
+ ba LFLGRET"ID"\n\
+ addcc %4,%4,%0\n\
+ ! N\n\
+ ba LFLGRET"ID"\n\
+ orcc %%g0,-1,%%g0\n\
+ ! NC\n\
+ ba LFLGRET"ID"\n\
+ addcc %%g0,%3,%%g0\n\
+ ! NV\n\
+ unimp\n\
+ nop\n\
+ ! NVC\n\
+ unimp\n\
+ nop\n\
+ ! NZ\n\
+ unimp\n\
+ nop\n\
+ ! NZC\n\
+ unimp\n\
+ nop\n\
+ ! NZV\n\
+ unimp\n\
+ nop\n\
+ ! NZVC\n\
+ unimp\n\
+ nop\n\
+LFLGRET"ID":\n\
+ mov %5,%%g2" \
+ : "=r"(scratch) \
+ : "r"(ms_flags*8), "r"(flgtab), "r"(-1), \
+ "r"(0x80000000), "r"(ms_saveret) \
+ : "cc", "%g2"); }
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+extern int current_function_calls_alloca;
+extern int current_function_outgoing_args_size;
+
+#define EXIT_IGNORE_STACK \
+ (get_frame_size () != 0 \
+ || current_function_calls_alloca || current_function_outgoing_args_size)
+
+/* This macro generates the assembly code for function exit,
+ on machines that need it. If FUNCTION_EPILOGUE is not defined
+ then individual return instructions are generated for each
+ return statement. Args are same as for FUNCTION_PROLOGUE.
+
+ The function epilogue should not depend on the current stack pointer!
+ It should use the frame pointer only. This is mandatory because
+ of alloca; we also take advantage of it to omit stack adjustments
+ before returning. */
+
+/* This declaration is needed due to traditional/ANSI
+ incompatibilities which cannot be #ifdefed away
+ because they occur inside of macros. Sigh. */
+extern union tree_node *current_function_decl;
+
+#define FUNCTION_EPILOGUE(FILE, SIZE) \
+ (TARGET_FLAT ? sparc_flat_output_function_epilogue (FILE, (int)SIZE) \
+ : output_function_epilogue (FILE, (int)SIZE, leaf_function))
+
+#define DELAY_SLOTS_FOR_EPILOGUE \
+ (TARGET_FLAT ? sparc_flat_epilogue_delay_slots () : 1)
+#define ELIGIBLE_FOR_EPILOGUE_DELAY(trial, slots_filled) \
+ (TARGET_FLAT ? sparc_flat_eligible_for_epilogue_delay (trial, slots_filled) \
+ : eligible_for_epilogue_delay (trial, slots_filled))
+
+/* Define registers used by the epilogue and return instruction. */
+#define EPILOGUE_USES(REGNO) \
+ (!TARGET_FLAT && REGNO == 31)
+
+/* Length in units of the trampoline for entering a nested function. */
+
+#define TRAMPOLINE_SIZE (TARGET_ARCH64 ? 32 : 16)
+
+#define TRAMPOLINE_ALIGNMENT 128 /* 16 bytes */
+
+/* Emit RTL insns to initialize the variable parts of a trampoline.
+ FNADDR is an RTX for the address of the function's pure code.
+ CXT is an RTX for the static chain value for the function. */
+
+void sparc_initialize_trampoline ();
+void sparc64_initialize_trampoline ();
+#define INITIALIZE_TRAMPOLINE(TRAMP, FNADDR, CXT) \
+ if (TARGET_ARCH64) \
+ sparc64_initialize_trampoline (TRAMP, FNADDR, CXT); \
+ else \
+ sparc_initialize_trampoline (TRAMP, FNADDR, CXT)
+
+/* Generate necessary RTL for __builtin_saveregs().
+ ARGLIST is the argument list; see expr.c. */
+
+extern struct rtx_def *sparc_builtin_saveregs ();
+#define EXPAND_BUILTIN_SAVEREGS(ARGLIST) sparc_builtin_saveregs (ARGLIST)
+
+/* Define this macro if the location where a function argument is passed
+ depends on whether or not it is a named argument.
+
+ This macro controls how the NAMED argument to FUNCTION_ARG
+ is set for varargs and stdarg functions. With this macro defined,
+ the NAMED argument is always true for named arguments, and false for
+ unnamed arguments. If this is not defined, but SETUP_INCOMING_VARARGS
+ is defined, then all arguments are treated as named. Otherwise, all named
+ arguments except the last are treated as named.
+ For the v9 we want NAMED to mean what it says it means. */
+
+#define STRICT_ARGUMENT_NAMING TARGET_V9
+
+/* Generate RTL to flush the register windows so as to make arbitrary frames
+ available. */
+#define SETUP_FRAME_ADDRESSES() \
+ emit_insn (gen_flush_register_windows ())
+
+/* Given an rtx for the address of a frame,
+ return an rtx for the address of the word in the frame
+ that holds the dynamic chain--the previous frame's address.
+ ??? -mflat support? */
+#define DYNAMIC_CHAIN_ADDRESS(frame) \
+ gen_rtx_PLUS (Pmode, frame, GEN_INT (14 * UNITS_PER_WORD))
+
+/* The return address isn't on the stack, it is in a register, so we can't
+ access it from the current frame pointer. We can access it from the
+ previous frame pointer though by reading a value from the register window
+ save area. */
+#define RETURN_ADDR_IN_PREVIOUS_FRAME
+
+/* This is the offset of the return address to the true next instruction to be
+ executed for the current function. */
+#define RETURN_ADDR_OFFSET \
+ (8 + 4 * (! TARGET_ARCH64 && current_function_returns_struct))
+
+/* The current return address is in %i7. The return address of anything
+ farther back is in the register window save area at [%fp+60]. */
+/* ??? This ignores the fact that the actual return address is +8 for normal
+ returns, and +12 for structure returns. */
+#define RETURN_ADDR_RTX(count, frame) \
+ ((count == -1) \
+ ? gen_rtx_REG (Pmode, 31) \
+ : gen_rtx_MEM (Pmode, \
+ memory_address (Pmode, plus_constant (frame, 15 * UNITS_PER_WORD))))
+
+/* Before the prologue, the return address is %o7 + 8. OK, sometimes it's
+ +12, but always using +8 is close enough for frame unwind purposes.
+ Actually, just using %o7 is close enough for unwinding, but %o7+8
+ is something you can return to. */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_rtx_PLUS (word_mode, gen_rtx_REG (word_mode, 15), GEN_INT (8))
+
+/* The offset from the incoming value of %sp to the top of the stack frame
+ for the current function. On sparc64, we have to account for the stack
+ bias if present. */
+#define INCOMING_FRAME_SP_OFFSET SPARC_STACK_BIAS
+
+#define DOESNT_NEED_UNWINDER (! TARGET_FLAT)
+
+/* Addressing modes, and classification of registers for them. */
+
+/* #define HAVE_POST_INCREMENT */
+/* #define HAVE_POST_DECREMENT */
+
+/* #define HAVE_PRE_DECREMENT */
+/* #define HAVE_PRE_INCREMENT */
+
+/* Macros to check register numbers against specific register classes. */
+
+/* These assume that REGNO is a hard or pseudo reg number.
+ They give nonzero only if REGNO is a hard reg of the suitable class
+ or a pseudo reg currently allocated to a suitable hard reg.
+ Since they use reg_renumber, they are safe only once reg_renumber
+ has been allocated, which happens in local-alloc.c. */
+
+#define REGNO_OK_FOR_INDEX_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
+#define REGNO_OK_FOR_BASE_P(REGNO) \
+((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < (unsigned)32)
+#define REGNO_OK_FOR_FP_P(REGNO) \
+ (((unsigned) (REGNO) - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)) \
+ || ((unsigned) reg_renumber[REGNO] - 32 < (TARGET_V9 ? (unsigned)64 : (unsigned)32)))
+#define REGNO_OK_FOR_CCFP_P(REGNO) \
+ (TARGET_V9 \
+ && (((unsigned) (REGNO) - 96 < (unsigned)4) \
+ || ((unsigned) reg_renumber[REGNO] - 96 < (unsigned)4)))
+
+/* Now macros that check whether X is a register and also,
+ strictly, whether it is in a specified class.
+
+ These macros are specific to the SPARC, and may be used only
+ in code for printing assembler insns and in conditions for
+ define_optimization. */
+
+/* 1 if X is an fp register. */
+
+#define FP_REG_P(X) (REG_P (X) && REGNO_OK_FOR_FP_P (REGNO (X)))
+
+/* Is X, a REG, an in or global register? i.e. is regno 0..7 or 24..31 */
+#define IN_OR_GLOBAL_P(X) (REGNO (X) < 8 || (REGNO (X) >= 24 && REGNO (X) <= 31))
+
+/* Maximum number of registers that can appear in a valid memory address. */
+
+#define MAX_REGS_PER_ADDRESS 2
+
+/* Recognize any constant value that is a valid address.
+ When PIC, we do not accept an address that would require a scratch reg
+ to load into a register. */
+
+#define CONSTANT_ADDRESS_P(X) \
+ (GET_CODE (X) == LABEL_REF || GET_CODE (X) == SYMBOL_REF \
+ || GET_CODE (X) == CONST_INT || GET_CODE (X) == HIGH \
+ || (GET_CODE (X) == CONST \
+ && ! (flag_pic && pic_address_needs_scratch (X))))
+
+/* Define this, so that when PIC, reload won't try to reload invalid
+ addresses which require two reload registers. */
+
+#define LEGITIMATE_PIC_OPERAND_P(X) (! pic_address_needs_scratch (X))
+
+/* Nonzero if the constant value X is a legitimate general operand.
+ Anything can be made to work except floating point constants. */
+
+#define LEGITIMATE_CONSTANT_P(X) \
+ (GET_CODE (X) != CONST_DOUBLE || GET_MODE (X) == VOIDmode)
+
+/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx
+ and check its validity for a certain class.
+ We have two alternate definitions for each of them.
+ The usual definition accepts all pseudo regs; the other rejects
+ them unless they have been allocated suitable hard regs.
+ The symbol REG_OK_STRICT causes the latter definition to be used.
+
+ Most source files want to accept pseudo regs in the hope that
+ they will get allocated to the class that the insn wants them to be in.
+ Source files for reload pass need to be strict.
+ After reload, it makes no difference, since pseudo regs have
+ been eliminated by then. */
+
+/* Optional extra constraints for this machine. Borrowed from romp.h.
+
+ For the SPARC, `Q' means that this is a memory operand but not a
+ symbolic memory operand. Note that an unassigned pseudo register
+ is such a memory operand. Needed because reload will generate
+ these things in insns and then not re-recognize the insns, causing
+ constrain_operands to fail.
+
+ `S' handles constraints for calls. ??? So where is it? */
+
+#ifndef REG_OK_STRICT
+
+/* Nonzero if X is a hard reg that can be used as an index
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_INDEX_P(X) \
+ (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+/* Nonzero if X is a hard reg that can be used as a base reg
+ or if it is a pseudo reg. */
+#define REG_OK_FOR_BASE_P(X) \
+ (((unsigned) REGNO (X)) - 32 >= (FIRST_PSEUDO_REGISTER - 32))
+
+/* 'T', 'U' are for aligned memory loads which aren't needed for v9. */
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' \
+ ? ((GET_CODE (OP) == MEM \
+ && memory_address_p (GET_MODE (OP), XEXP (OP, 0)) \
+ && ! symbolic_memory_operand (OP, VOIDmode)) \
+ || (reload_in_progress && GET_CODE (OP) == REG \
+ && REGNO (OP) >= FIRST_PSEUDO_REGISTER)) \
+ : (! TARGET_ARCH64 && (C) == 'T') \
+ ? (mem_aligned_8 (OP)) \
+ : (! TARGET_ARCH64 && (C) == 'U') \
+ ? (register_ok_for_ldd (OP)) \
+ : 0)
+
+#else
+
+/* Nonzero if X is a hard reg that can be used as an index. */
+#define REG_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_P (REGNO (X))
+/* Nonzero if X is a hard reg that can be used as a base reg. */
+#define REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X))
+
+#define EXTRA_CONSTRAINT(OP, C) \
+ ((C) == 'Q' \
+ ? (GET_CODE (OP) == REG \
+ ? (REGNO (OP) >= FIRST_PSEUDO_REGISTER \
+ && reg_renumber[REGNO (OP)] < 0) \
+ : GET_CODE (OP) == MEM) \
+ : (! TARGET_ARCH64 && (C) == 'T') \
+ ? mem_aligned_8 (OP) && strict_memory_address_p (Pmode, XEXP (OP, 0)) \
+ : (! TARGET_ARCH64 && (C) == 'U') \
+ ? (GET_CODE (OP) == REG \
+ && (REGNO (OP) < FIRST_PSEUDO_REGISTER \
+ || reg_renumber[REGNO (OP)] >= 0) \
+ && register_ok_for_ldd (OP)) \
+ : 0)
+#endif
+
+/* GO_IF_LEGITIMATE_ADDRESS recognizes an RTL expression
+ that is a valid memory address for an instruction.
+ The MODE argument is the machine mode for the MEM expression
+ that wants to use this address.
+
+ On SPARC, the actual legitimate addresses must be REG+REG or REG+SMALLINT
+ ordinarily. This changes a bit when generating PIC.
+
+ If you change this, execute "rm explow.o recog.o reload.o". */
+
+#define RTX_OK_FOR_BASE_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_BASE_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_BASE_P (SUBREG_REG (X))))
+
+#define RTX_OK_FOR_INDEX_P(X) \
+ ((GET_CODE (X) == REG && REG_OK_FOR_INDEX_P (X)) \
+ || (GET_CODE (X) == SUBREG \
+ && GET_CODE (SUBREG_REG (X)) == REG \
+ && REG_OK_FOR_INDEX_P (SUBREG_REG (X))))
+
+#define RTX_OK_FOR_OFFSET_P(X) \
+ (GET_CODE (X) == CONST_INT && INTVAL (X) >= -0x1000 && INTVAL (X) < 0x1000)
+
+#define GO_IF_LEGITIMATE_ADDRESS(MODE, X, ADDR) \
+{ if (RTX_OK_FOR_BASE_P (X)) \
+ goto ADDR; \
+ else if (GET_CODE (X) == PLUS) \
+ { \
+ register rtx op0 = XEXP (X, 0); \
+ register rtx op1 = XEXP (X, 1); \
+ if (flag_pic && op0 == pic_offset_table_rtx) \
+ { \
+ if (RTX_OK_FOR_BASE_P (op1)) \
+ goto ADDR; \
+ else if (flag_pic == 1 \
+ && GET_CODE (op1) != REG \
+ && GET_CODE (op1) != LO_SUM \
+ && GET_CODE (op1) != MEM \
+ && (GET_CODE (op1) != CONST_INT \
+ || SMALL_INT (op1))) \
+ goto ADDR; \
+ } \
+ else if (RTX_OK_FOR_BASE_P (op0)) \
+ { \
+ if (RTX_OK_FOR_INDEX_P (op1) \
+ || RTX_OK_FOR_OFFSET_P (op1)) \
+ goto ADDR; \
+ } \
+ else if (RTX_OK_FOR_BASE_P (op1)) \
+ { \
+ if (RTX_OK_FOR_INDEX_P (op0) \
+ || RTX_OK_FOR_OFFSET_P (op0)) \
+ goto ADDR; \
+ } \
+ } \
+ else if (GET_CODE (X) == LO_SUM) \
+ { \
+ register rtx op0 = XEXP (X, 0); \
+ register rtx op1 = XEXP (X, 1); \
+ if (RTX_OK_FOR_BASE_P (op0) \
+ && CONSTANT_P (op1) \
+ /* We can't allow TFmode, because an offset \
+ greater than or equal to the alignment (8) \
+ may cause the LO_SUM to overflow. */ \
+ && MODE != TFmode) \
+ goto ADDR; \
+ } \
+ else if (GET_CODE (X) == CONST_INT && SMALL_INT (X)) \
+ goto ADDR; \
+}
+
+/* Try machine-dependent ways of modifying an illegitimate address
+ to be legitimate. If we find one, return the new, valid address.
+ This macro is used in only one place: `memory_address' in explow.c.
+
+ OLDX is the address as it was before break_out_memory_refs was called.
+ In some cases it is useful to look at this to decide what needs to be done.
+
+ MODE and WIN are passed so that this macro can use
+ GO_IF_LEGITIMATE_ADDRESS.
+
+ It is always safe for this macro to do nothing. It exists to recognize
+ opportunities to optimize the output. */
+
+/* On SPARC, change REG+N into REG+REG, and REG+(X*Y) into REG+REG. */
+extern struct rtx_def *legitimize_pic_address ();
+#define LEGITIMIZE_ADDRESS(X,OLDX,MODE,WIN) \
+{ rtx sparc_x = (X); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == MULT) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 1), \
+ force_operand (XEXP (X, 0), NULL_RTX)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == MULT) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), NULL_RTX)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 0)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, force_operand (XEXP (X, 0), NULL_RTX),\
+ XEXP (X, 1)); \
+ if (GET_CODE (X) == PLUS && GET_CODE (XEXP (X, 1)) == PLUS) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ force_operand (XEXP (X, 1), NULL_RTX)); \
+ if (sparc_x != (X) && memory_address_p (MODE, X)) \
+ goto WIN; \
+ if (flag_pic) (X) = legitimize_pic_address (X, MODE, 0); \
+ else if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 1))) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 0), \
+ copy_to_mode_reg (Pmode, XEXP (X, 1))); \
+ else if (GET_CODE (X) == PLUS && CONSTANT_ADDRESS_P (XEXP (X, 0))) \
+ (X) = gen_rtx_PLUS (Pmode, XEXP (X, 1), \
+ copy_to_mode_reg (Pmode, XEXP (X, 0))); \
+ else if (GET_CODE (X) == SYMBOL_REF || GET_CODE (X) == CONST \
+ || GET_CODE (X) == LABEL_REF) \
+ (X) = gen_rtx_LO_SUM (Pmode, \
+ copy_to_mode_reg (Pmode, gen_rtx_HIGH (Pmode, X)), X); \
+ if (memory_address_p (MODE, X)) \
+ goto WIN; }
+
+/* Go to LABEL if ADDR (a legitimate address expression)
+ has an effect that depends on the machine mode it is used for.
+ On the SPARC this is never true. */
+
+#define GO_IF_MODE_DEPENDENT_ADDRESS(ADDR,LABEL)
+
+/* If we are referencing a function make the SYMBOL_REF special.
+ In the Embedded Medium/Anywhere code model, %g4 points to the data segment
+ so we must not add it to function addresses. */
+
+#define ENCODE_SECTION_INFO(DECL) \
+ do { \
+ if (TARGET_CM_EMBMEDANY && TREE_CODE (DECL) == FUNCTION_DECL) \
+ SYMBOL_REF_FLAG (XEXP (DECL_RTL (DECL), 0)) = 1; \
+ } while (0)
+
+/* Specify the machine mode that this machine uses
+ for the index in the tablejump instruction. */
+#define CASE_VECTOR_MODE Pmode
+
+/* Define as C expression which evaluates to nonzero if the tablejump
+ instruction expects the table to contain offsets from the address of the
+ table.
+ Do not define this if the table should contain absolute addresses. */
+/* #define CASE_VECTOR_PC_RELATIVE 1 */
+
+/* Specify the tree operation to be used to convert reals to integers. */
+#define IMPLICIT_FIX_EXPR FIX_ROUND_EXPR
+
+/* This is the kind of divide that is easiest to do in the general case. */
+#define EASY_DIV_EXPR TRUNC_DIV_EXPR
+
+/* Define this as 1 if `char' should by default be signed; else as 0. */
+#define DEFAULT_SIGNED_CHAR 1
+
+/* Max number of bytes we can move from memory to memory
+ in one reasonably fast instruction. */
+#define MOVE_MAX 8
+
+#if 0 /* Sun 4 has matherr, so this is no good. */
+/* This is the value of the error code EDOM for this machine,
+ used by the sqrt instruction. */
+#define TARGET_EDOM 33
+
+/* This is how to refer to the variable errno. */
+#define GEN_ERRNO_RTX \
+ gen_rtx_MEM (SImode, gen_rtx_SYMBOL_REF (Pmode, "errno"))
+#endif /* 0 */
+
+/* Define if operations between registers always perform the operation
+ on the full register even if a narrower mode is specified. */
+#define WORD_REGISTER_OPERATIONS
+
+/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD
+ will either zero-extend or sign-extend. The value of this macro should
+ be the code that says which one of the two operations is implicitly
+ done, NIL if none. */
+#define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
+
+/* Nonzero if access to memory by bytes is slow and undesirable.
+ For RISC chips, it means that access to memory by bytes is no
+ better than access by words when possible, so grab a whole word
+ and maybe make use of that. */
+#define SLOW_BYTE_ACCESS 1
+
+/* We assume that the store-condition-codes instructions store 0 for false
+ and some other value for true. This is the value stored for true. */
+
+#define STORE_FLAG_VALUE 1
+
+/* When a prototype says `char' or `short', really pass an `int'. */
+#define PROMOTE_PROTOTYPES
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* Value is 1 if truncating an integer of INPREC bits to OUTPREC bits
+ is done just by pretending it is already truncated. */
+#define TRULY_NOOP_TRUNCATION(OUTPREC, INPREC) 1
+
+/* Specify the machine mode that pointers have.
+ After generation of rtl, the compiler makes no further distinction
+ between pointers and any other objects of this machine mode. */
+#define Pmode (TARGET_PTR64 ? DImode : SImode)
+
+/* Generate calls to memcpy, memcmp and memset. */
+#define TARGET_MEM_FUNCTIONS
+
+/* Add any extra modes needed to represent the condition code.
+
+ On the Sparc, we have a "no-overflow" mode which is used when an add or
+ subtract insn is used to set the condition code. Different branches are
+ used in this case for some operations.
+
+ We also have two modes to indicate that the relevant condition code is
+ in the floating-point condition code register. One for comparisons which
+ will generate an exception if the result is unordered (CCFPEmode) and
+ one for comparisons which will never trap (CCFPmode).
+
+ CCXmode and CCX_NOOVmode are only used by v9. */
+
+#define EXTRA_CC_MODES CCXmode, CC_NOOVmode, CCX_NOOVmode, CCFPmode, CCFPEmode
+
+/* Define the names for the modes specified above. */
+
+#define EXTRA_CC_NAMES "CCX", "CC_NOOV", "CCX_NOOV", "CCFP", "CCFPE"
+
+/* Given a comparison code (EQ, NE, etc.) and the first operand of a COMPARE,
+ return the mode to be used for the comparison. For floating-point,
+ CCFP[E]mode is used. CC_NOOVmode should be used when the first operand is a
+ PLUS, MINUS, NEG, or ASHIFT. CCmode should be used when no special
+ processing is needed. */
+#define SELECT_CC_MODE(OP,X,Y) \
+ (GET_MODE_CLASS (GET_MODE (X)) == MODE_FLOAT \
+ ? ((OP == EQ || OP == NE) ? CCFPmode : CCFPEmode) \
+ : ((GET_CODE (X) == PLUS || GET_CODE (X) == MINUS \
+ || GET_CODE (X) == NEG || GET_CODE (X) == ASHIFT) \
+ ? (TARGET_ARCH64 && GET_MODE (X) == DImode ? CCX_NOOVmode : CC_NOOVmode) \
+ : ((TARGET_ARCH64 || TARGET_V8PLUS) && GET_MODE (X) == DImode ? CCXmode : CCmode)))
+
+/* Return non-zero if SELECT_CC_MODE will never return MODE for a
+ floating point inequality comparison. */
+
+#define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPEmode)
+
+/* A function address in a call instruction
+ is a byte address (for indexing purposes)
+ so give the MEM rtx a byte's mode. */
+#define FUNCTION_MODE SImode
+
+/* Define this if addresses of constant functions
+ shouldn't be put through pseudo regs where they can be cse'd.
+ Desirable on machines where ordinary constants are expensive
+ but a CALL with constant address is cheap. */
+#define NO_FUNCTION_CSE
+
+/* alloca should avoid clobbering the old register save area. */
+#define SETJMP_VIA_SAVE_AREA
+
+/* Define subroutines to call to handle multiply and divide.
+ Use the subroutines that Sun's library provides.
+ The `*' prevents an underscore from being prepended by the compiler. */
+
+#define DIVSI3_LIBCALL "*.div"
+#define UDIVSI3_LIBCALL "*.udiv"
+#define MODSI3_LIBCALL "*.rem"
+#define UMODSI3_LIBCALL "*.urem"
+/* .umul is a little faster than .mul. */
+#define MULSI3_LIBCALL "*.umul"
+
+/* Define library calls for quad FP operations. These are all part of the
+ SPARC ABI. */
+#define ADDTF3_LIBCALL "_Q_add"
+#define SUBTF3_LIBCALL "_Q_sub"
+#define NEGTF2_LIBCALL "_Q_neg"
+#define MULTF3_LIBCALL "_Q_mul"
+#define DIVTF3_LIBCALL "_Q_div"
+#define FLOATSITF2_LIBCALL "_Q_itoq"
+#define FIX_TRUNCTFSI2_LIBCALL "_Q_qtoi"
+#define FIXUNS_TRUNCTFSI2_LIBCALL "_Q_qtou"
+#define EXTENDSFTF2_LIBCALL "_Q_stoq"
+#define TRUNCTFSF2_LIBCALL "_Q_qtos"
+#define EXTENDDFTF2_LIBCALL "_Q_dtoq"
+#define TRUNCTFDF2_LIBCALL "_Q_qtod"
+#define EQTF2_LIBCALL "_Q_feq"
+#define NETF2_LIBCALL "_Q_fne"
+#define GTTF2_LIBCALL "_Q_fgt"
+#define GETF2_LIBCALL "_Q_fge"
+#define LTTF2_LIBCALL "_Q_flt"
+#define LETF2_LIBCALL "_Q_fle"
+
+/* We can define the TFmode sqrt optab only if TARGET_FPU. This is because
+ with soft-float, the SFmode and DFmode sqrt instructions will be absent,
+ and the compiler will notice and try to use the TFmode sqrt instruction
+ for calls to the builtin function sqrt, but this fails. */
+#define INIT_TARGET_OPTABS \
+ do { \
+ add_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, ADDTF3_LIBCALL); \
+ sub_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, SUBTF3_LIBCALL); \
+ neg_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, NEGTF2_LIBCALL); \
+ smul_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, MULTF3_LIBCALL); \
+ flodiv_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, DIVTF3_LIBCALL); \
+ eqtf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EQTF2_LIBCALL); \
+ netf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, NETF2_LIBCALL); \
+ gttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GTTF2_LIBCALL); \
+ getf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, GETF2_LIBCALL); \
+ lttf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LTTF2_LIBCALL); \
+ letf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, LETF2_LIBCALL); \
+ trunctfsf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFSF2_LIBCALL); \
+ trunctfdf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, TRUNCTFDF2_LIBCALL); \
+ extendsftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDSFTF2_LIBCALL); \
+ extenddftf2_libfunc = gen_rtx_SYMBOL_REF (Pmode, EXTENDDFTF2_LIBCALL); \
+ floatsitf_libfunc = gen_rtx_SYMBOL_REF (Pmode, FLOATSITF2_LIBCALL); \
+ fixtfsi_libfunc = gen_rtx_SYMBOL_REF (Pmode, FIX_TRUNCTFSI2_LIBCALL); \
+ fixunstfsi_libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, FIXUNS_TRUNCTFSI2_LIBCALL); \
+ if (TARGET_FPU) \
+ sqrt_optab->handlers[(int) TFmode].libfunc \
+ = gen_rtx_SYMBOL_REF (Pmode, "_Q_sqrt"); \
+ INIT_SUBTARGET_OPTABS; \
+ } while (0)
+
+/* This is meant to be redefined in the host dependent files */
+#define INIT_SUBTARGET_OPTABS
+
+/* Compute the cost of computing a constant rtl expression RTX
+ whose rtx-code is CODE. The body of this macro is a portion
+ of a switch statement. If the code is computed here,
+ return it with a return statement. Otherwise, break from the switch. */
+
+#define CONST_COSTS(RTX,CODE,OUTER_CODE) \
+ case CONST_INT: \
+ if (INTVAL (RTX) < 0x1000 && INTVAL (RTX) >= -0x1000) \
+ return 0; \
+ case HIGH: \
+ return 2; \
+ case CONST: \
+ case LABEL_REF: \
+ case SYMBOL_REF: \
+ return 4; \
+ case CONST_DOUBLE: \
+ if (GET_MODE (RTX) == DImode) \
+ if ((XINT (RTX, 3) == 0 \
+ && (unsigned) XINT (RTX, 2) < 0x1000) \
+ || (XINT (RTX, 3) == -1 \
+ && XINT (RTX, 2) < 0 \
+ && XINT (RTX, 2) >= -0x1000)) \
+ return 0; \
+ return 8;
+
+/* Compute the cost of an address. For the sparc, all valid addresses are
+ the same cost. */
+
+#define ADDRESS_COST(RTX) 1
+
+/* Compute extra cost of moving data between one register class
+ and another. */
+#define GENERAL_OR_I64(C) ((C) == GENERAL_REGS || (C) == I64_REGS)
+#define REGISTER_MOVE_COST(CLASS1, CLASS2) \
+ (((FP_REG_CLASS_P (CLASS1) && GENERAL_OR_I64 (CLASS2)) \
+ || (GENERAL_OR_I64 (CLASS1) && FP_REG_CLASS_P (CLASS2)) \
+ || (CLASS1) == FPCC_REGS || (CLASS2) == FPCC_REGS) \
+ ? (sparc_cpu == PROCESSOR_ULTRASPARC ? 12 : 6) : 2)
+
+/* Provide the costs of a rtl expression. This is in the body of a
+ switch on CODE. The purpose for the cost of MULT is to encourage
+ `synth_mult' to find a synthetic multiply when reasonable.
+
+ If we need more than 12 insns to do a multiply, then go out-of-line,
+ since the call overhead will be < 10% of the cost of the multiply. */
+
+#define RTX_COSTS(X,CODE,OUTER_CODE) \
+ case MULT: \
+ return TARGET_HARD_MUL ? COSTS_N_INSNS (5) : COSTS_N_INSNS (25); \
+ case DIV: \
+ case UDIV: \
+ case MOD: \
+ case UMOD: \
+ return COSTS_N_INSNS (25); \
+ /* Make FLOAT and FIX more expensive than CONST_DOUBLE,\
+ so that cse will favor the latter. */ \
+ case FLOAT: \
+ case FIX: \
+ return 19;
+
+#define ISSUE_RATE sparc_issue_rate()
+
+/* Adjust the cost of dependencies. */
+#define ADJUST_COST(INSN,LINK,DEP,COST) \
+ if (sparc_cpu == PROCESSOR_SUPERSPARC) \
+ (COST) = supersparc_adjust_cost (INSN, LINK, DEP, COST); \
+ else if (sparc_cpu == PROCESSOR_ULTRASPARC) \
+ (COST) = ultrasparc_adjust_cost (INSN, LINK, DEP, COST); \
+ else
+
+/* Conditional branches with empty delay slots have a length of two. */
+#define ADJUST_INSN_LENGTH(INSN, LENGTH) \
+ if (GET_CODE (INSN) == CALL_INSN \
+ || (GET_CODE (INSN) == JUMP_INSN && ! simplejump_p (insn))) \
+ LENGTH += 1; else
+
+/* Control the assembler format that we output. */
+
+/* Output at beginning of assembler file. */
+
+#define ASM_FILE_START(file)
+
+/* A C string constant describing how to begin a comment in the target
+ assembler language. The compiler assumes that the comment will end at
+ the end of the line. */
+
+#define ASM_COMMENT_START "!"
+
+/* Output to assembler file text saying following lines
+ may contain character constants, extra white space, comments, etc. */
+
+#define ASM_APP_ON ""
+
+/* Output to assembler file text saying following lines
+ no longer contain unusual constructs. */
+
+#define ASM_APP_OFF ""
+
+/* ??? Try to make the style consistent here (_OP?). */
+
+#define ASM_LONGLONG ".xword"
+#define ASM_LONG ".word"
+#define ASM_SHORT ".half"
+#define ASM_BYTE_OP ".byte"
+#define ASM_FLOAT ".single"
+#define ASM_DOUBLE ".double"
+#define ASM_LONGDOUBLE ".xxx" /* ??? Not known (or used yet). */
+
+/* Output before read-only data. */
+
+#define TEXT_SECTION_ASM_OP ".text"
+
+/* Output before writable data. */
+
+#define DATA_SECTION_ASM_OP ".data"
+
+/* How to refer to registers in assembler output.
+ This sequence is indexed by compiler's hard-register-number (see above). */
+
+#define REGISTER_NAMES \
+{"%g0", "%g1", "%g2", "%g3", "%g4", "%g5", "%g6", "%g7", \
+ "%o0", "%o1", "%o2", "%o3", "%o4", "%o5", "%sp", "%o7", \
+ "%l0", "%l1", "%l2", "%l3", "%l4", "%l5", "%l6", "%l7", \
+ "%i0", "%i1", "%i2", "%i3", "%i4", "%i5", "%fp", "%i7", \
+ "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7", \
+ "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15", \
+ "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23", \
+ "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31", \
+ "%f32", "%f33", "%f34", "%f35", "%f36", "%f37", "%f38", "%f39", \
+ "%f40", "%f41", "%f42", "%f43", "%f44", "%f45", "%f46", "%f47", \
+ "%f48", "%f49", "%f50", "%f51", "%f52", "%f53", "%f54", "%f55", \
+ "%f56", "%f57", "%f58", "%f59", "%f60", "%f61", "%f62", "%f63", \
+ "%fcc0", "%fcc1", "%fcc2", "%fcc3", "%icc"}
+
+/* Define additional names for use in asm clobbers and asm declarations. */
+
+#define ADDITIONAL_REGISTER_NAMES \
+{{"ccr", SPARC_ICC_REG}, {"cc", SPARC_ICC_REG}}
+
+/* How to renumber registers for dbx and gdb. In the flat model, the frame
+ pointer is really %i7. */
+
+#define DBX_REGISTER_NUMBER(REGNO) \
+ (TARGET_FLAT && REGNO == FRAME_POINTER_REGNUM ? 31 : REGNO)
+
+/* On Sun 4, this limit is 2048. We use 1000 to be safe, since the length
+ can run past this up to a continuation point. Once we used 1500, but
+ a single entry in C++ can run more than 500 bytes, due to the length of
+ mangled symbol names. dbxout.c should really be fixed to do
+ continuations when they are actually needed instead of trying to
+ guess... */
+#define DBX_CONTIN_LENGTH 1000
+
+/* This is how to output a note to DBX telling it the line number
+ to which the following sequence of instructions corresponds.
+
+ This is needed for SunOS 4.0, and should not hurt for 3.2
+ versions either. */
+#define ASM_OUTPUT_SOURCE_LINE(file, line) \
+ { static int sym_lineno = 1; \
+ fprintf (file, ".stabn 68,0,%d,LM%d\nLM%d:\n", \
+ line, sym_lineno, sym_lineno); \
+ sym_lineno += 1; }
+
+/* This is how to output the definition of a user-level label named NAME,
+ such as the label on a static function or variable NAME. */
+
+#define ASM_OUTPUT_LABEL(FILE,NAME) \
+ do { assemble_name (FILE, NAME); fputs (":\n", FILE); } while (0)
+
+/* This is how to output a command to make the user-level label named NAME
+ defined for reference from other files. */
+
+#define ASM_GLOBALIZE_LABEL(FILE,NAME) \
+ do { fputs ("\t.global ", FILE); assemble_name (FILE, NAME); fputs ("\n", FILE);} while (0)
+
+/* The prefix to add to user-visible assembler symbols. */
+
+#define USER_LABEL_PREFIX "_"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, "%s%d:\n", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf ((LABEL), "*%s%ld", (PREFIX), (long)(NUM))
+
+/* This is how to output an assembler line defining a `float' constant.
+ We always have to use a .long pseudo-op to do this because the native
+ SVR4 ELF assembler is buggy and it generates incorrect values when we
+ try to use the .float pseudo-op instead. */
+
+#define ASM_OUTPUT_FLOAT(FILE,VALUE) \
+ { \
+ long t; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_SINGLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t, \
+ ASM_COMMENT_START, str); \
+ } \
+
+/* This is how to output an assembler line defining a `double' constant.
+ We always have to use a .long pseudo-op to do this because the native
+ SVR4 ELF assembler is buggy and it generates incorrect values when we
+ try to use the .float pseudo-op instead. */
+
+#define ASM_OUTPUT_DOUBLE(FILE,VALUE) \
+ { \
+ long t[2]; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_DOUBLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t[0], \
+ ASM_COMMENT_START, str); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[1]); \
+ }
+
+/* This is how to output an assembler line defining a `long double'
+ constant. */
+
+#define ASM_OUTPUT_LONG_DOUBLE(FILE,VALUE) \
+ { \
+ long t[4]; \
+ char str[30]; \
+ REAL_VALUE_TO_TARGET_LONG_DOUBLE ((VALUE), t); \
+ REAL_VALUE_TO_DECIMAL ((VALUE), "%.20e", str); \
+ fprintf (FILE, "\t%s\t0x%lx %s ~%s\n", ASM_LONG, t[0], \
+ ASM_COMMENT_START, str); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[1]); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[2]); \
+ fprintf (FILE, "\t%s\t0x%lx\n", ASM_LONG, t[3]); \
+ }
+
+/* This is how to output an assembler line defining an `int' constant. */
+
+#define ASM_OUTPUT_INT(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_LONG), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line defining a DImode constant. */
+#define ASM_OUTPUT_DOUBLE_INT(FILE,VALUE) \
+ output_double_int (FILE, VALUE)
+
+/* Likewise for `char' and `short' constants. */
+
+#define ASM_OUTPUT_SHORT(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_SHORT), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+#define ASM_OUTPUT_CHAR(FILE,VALUE) \
+( fprintf (FILE, "\t%s\t", ASM_BYTE_OP), \
+ output_addr_const (FILE, (VALUE)), \
+ fprintf (FILE, "\n"))
+
+/* This is how to output an assembler line for a numeric constant byte. */
+
+#define ASM_OUTPUT_BYTE(FILE,VALUE) \
+ fprintf (FILE, "\t%s\t0x%x\n", ASM_BYTE_OP, (VALUE))
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ if (Pmode == SImode) \
+ fprintf (FILE, "\t.word\t"); \
+ else \
+ fprintf (FILE, "\t.xword\t"); \
+ assemble_name (FILE, label); \
+ fputc ('\n', FILE); \
+} while (0)
+
+/* This is how to output an element of a case-vector that is relative.
+ (SPARC uses such vectors only when generating PIC.) */
+
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+do { \
+ char label[30]; \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", VALUE); \
+ if (Pmode == SImode) \
+ fprintf (FILE, "\t.word\t"); \
+ else \
+ fprintf (FILE, "\t.xword\t"); \
+ assemble_name (FILE, label); \
+ ASM_GENERATE_INTERNAL_LABEL (label, "L", (REL)); \
+ fputc ('-', FILE); \
+ assemble_name (FILE, label); \
+ fputc ('\n', FILE); \
+} while (0)
+
+/* This is how to output an assembler line
+ that says to advance the location counter
+ to a multiple of 2**LOG bytes. */
+
+#define ASM_OUTPUT_ALIGN(FILE,LOG) \
+ if ((LOG) != 0) \
+ fprintf (FILE, "\t.align %d\n", (1<<(LOG)))
+
+#define LABEL_ALIGN_AFTER_BARRIER(LABEL) (sparc_align_jumps)
+
+#define LOOP_ALIGN(LABEL) (sparc_align_loops)
+
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+/* This says how to output an assembler line
+ to define a global common symbol. */
+
+#define ASM_OUTPUT_COMMON(FILE, NAME, SIZE, ROUNDED) \
+( fputs ("\t.common ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u,\"bss\"\n", (SIZE)))
+
+/* This says how to output an assembler line to define a local common
+ symbol. */
+
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGNED) \
+( fputs ("\t.reserve ", (FILE)), \
+ assemble_name ((FILE), (NAME)), \
+ fprintf ((FILE), ",%u,\"bss\",%u\n", \
+ (SIZE), ((ALIGNED) / BITS_PER_UNIT)))
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ do { \
+ fputs (".globl ", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ fputs ("\n", (FILE)); \
+ ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \
+ } while (0)
+
+/* Store in OUTPUT a string (made with alloca) containing
+ an assembler-name for a local static variable named NAME.
+ LABELNO is an integer which is different for each call. */
+
+#define ASM_FORMAT_PRIVATE_NAME(OUTPUT, NAME, LABELNO) \
+( (OUTPUT) = (char *) alloca (strlen ((NAME)) + 10), \
+ sprintf ((OUTPUT), "%s.%d", (NAME), (LABELNO)))
+
+#define IDENT_ASM_OP ".ident"
+
+/* Output #ident as a .ident. */
+
+#define ASM_OUTPUT_IDENT(FILE, NAME) \
+ fprintf (FILE, "\t%s\t\"%s\"\n", IDENT_ASM_OP, NAME);
+
+/* Output code to add DELTA to the first argument, and then jump to FUNCTION.
+ Used for C++ multiple inheritance. */
+#define ASM_OUTPUT_MI_THUNK(FILE, THUNK_FNDECL, DELTA, FUNCTION) \
+do { \
+ int big_delta = (DELTA) >= 4096 || (DELTA) < -4096; \
+ if (big_delta) \
+ fprintf (FILE, "\tset %d,%%g1\n\tadd %%o0,%%g1,%%o0\n", (DELTA)); \
+ /* Don't use the jmp solution unless we know the target is local to \
+ the application or shared object. \
+ XXX: Wimp out and don't actually check anything except if this is \
+ an embedded target where we assume there are no shared libs. */ \
+ if (!TARGET_CM_EMBMEDANY || flag_pic) \
+ { \
+ if (! big_delta) \
+ fprintf (FILE, "\tadd %%o0,%d,%%o0\n", DELTA); \
+ fprintf (FILE, "\tmov %%o7,%%g1\n"); \
+ fprintf (FILE, "\tcall "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ",0\n"); \
+ } \
+ else if (TARGET_CM_EMBMEDANY) \
+ { \
+ fprintf (FILE, "\tsetx "); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ",%%g5,%%g1\n\tjmp %%g1\n"); \
+ } \
+ else \
+ { \
+ fprintf (FILE, "\tsethi %%hi("); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, "),%%g1\n\tjmp %%g1+%%lo("); \
+ assemble_name (FILE, XSTR (XEXP (DECL_RTL (FUNCTION), 0), 0)); \
+ fprintf (FILE, ")\n"); \
+ } \
+ if (!TARGET_CM_EMBMEDANY || flag_pic) \
+ fprintf (FILE, "\tmov %%g1,%%o7\n"); \
+ else if (big_delta) \
+ fprintf (FILE, "\tnop\n"); \
+ else \
+ fprintf (FILE, "\tadd %%o0,%d,%%o0\n", DELTA); \
+} while (0)
+
+/* Define the parentheses used to group arithmetic operations
+ in assembler code. */
+
+#define ASM_OPEN_PAREN "("
+#define ASM_CLOSE_PAREN ")"
+
+/* Define results of standard character escape sequences. */
+#define TARGET_BELL 007
+#define TARGET_BS 010
+#define TARGET_TAB 011
+#define TARGET_NEWLINE 012
+#define TARGET_VT 013
+#define TARGET_FF 014
+#define TARGET_CR 015
+
+#define PRINT_OPERAND_PUNCT_VALID_P(CHAR) \
+ ((CHAR) == '#' || (CHAR) == '*' || (CHAR) == '^' || (CHAR) == '(' || (CHAR) == '_')
+
+/* Print operand X (an rtx) in assembler syntax to file FILE.
+ CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
+ For `%' followed by punctuation, CODE is the punctuation and X is null. */
+
+#define PRINT_OPERAND(FILE, X, CODE) print_operand (FILE, X, CODE)
+
+/* Print a memory address as an operand to reference that memory location. */
+
+#define PRINT_OPERAND_ADDRESS(FILE, ADDR) \
+{ register rtx base, index = 0; \
+ int offset = 0; \
+ register rtx addr = ADDR; \
+ if (GET_CODE (addr) == REG) \
+ fputs (reg_names[REGNO (addr)], FILE); \
+ else if (GET_CODE (addr) == PLUS) \
+ { \
+ if (GET_CODE (XEXP (addr, 0)) == CONST_INT) \
+ offset = INTVAL (XEXP (addr, 0)), base = XEXP (addr, 1);\
+ else if (GET_CODE (XEXP (addr, 1)) == CONST_INT) \
+ offset = INTVAL (XEXP (addr, 1)), base = XEXP (addr, 0);\
+ else \
+ base = XEXP (addr, 0), index = XEXP (addr, 1); \
+ fputs (reg_names[REGNO (base)], FILE); \
+ if (index == 0) \
+ fprintf (FILE, "%+d", offset); \
+ else if (GET_CODE (index) == REG) \
+ fprintf (FILE, "+%s", reg_names[REGNO (index)]); \
+ else if (GET_CODE (index) == SYMBOL_REF \
+ || GET_CODE (index) == LABEL_REF \
+ || GET_CODE (index) == CONST) \
+ fputc ('+', FILE), output_addr_const (FILE, index); \
+ else abort (); \
+ } \
+ else if (GET_CODE (addr) == MINUS \
+ && GET_CODE (XEXP (addr, 1)) == LABEL_REF) \
+ { \
+ output_addr_const (FILE, XEXP (addr, 0)); \
+ fputs ("-(", FILE); \
+ output_addr_const (FILE, XEXP (addr, 1)); \
+ fputs ("-.)", FILE); \
+ } \
+ else if (GET_CODE (addr) == LO_SUM) \
+ { \
+ output_operand (XEXP (addr, 0), 0); \
+ fputs ("+%lo(", FILE); \
+ output_address (XEXP (addr, 1)); \
+ fputc (')', FILE); \
+ } \
+ else if (flag_pic && GET_CODE (addr) == CONST \
+ && GET_CODE (XEXP (addr, 0)) == MINUS \
+ && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST \
+ && GET_CODE (XEXP (XEXP (XEXP (addr, 0), 1), 0)) == MINUS \
+ && XEXP (XEXP (XEXP (XEXP (addr, 0), 1), 0), 1) == pc_rtx) \
+ { \
+ addr = XEXP (addr, 0); \
+ output_addr_const (FILE, XEXP (addr, 0)); \
+ /* Group the args of the second CONST in parenthesis. */ \
+ fputs ("-(", FILE); \
+ /* Skip past the second CONST--it does nothing for us. */\
+ output_addr_const (FILE, XEXP (XEXP (addr, 1), 0)); \
+ /* Close the parenthesis. */ \
+ fputc (')', FILE); \
+ } \
+ else \
+ { \
+ output_addr_const (FILE, addr); \
+ } \
+}
+
+/* Define the codes that are matched by predicates in sparc.c. */
+
+#define PREDICATE_CODES \
+{"reg_or_0_operand", {SUBREG, REG, CONST_INT, CONST_DOUBLE}}, \
+{"fp_zero_operand", {CONST_DOUBLE}}, \
+{"intreg_operand", {SUBREG, REG}}, \
+{"fcc_reg_operand", {REG}}, \
+{"icc_or_fcc_reg_operand", {REG}}, \
+{"restore_operand", {REG}}, \
+{"call_operand", {MEM}}, \
+{"call_operand_address", {SYMBOL_REF, LABEL_REF, CONST, CONST_DOUBLE, ADDRESSOF, \
+ SUBREG, REG, PLUS, LO_SUM, CONST_INT}}, \
+{"symbolic_operand", {SYMBOL_REF, LABEL_REF, CONST, CONST_DOUBLE}}, \
+{"symbolic_memory_operand", {SUBREG, MEM}}, \
+{"label_ref_operand", {LABEL_REF}}, \
+{"sp64_medium_pic_operand", {CONST}}, \
+{"data_segment_operand", {SYMBOL_REF, PLUS, CONST}}, \
+{"text_segment_operand", {LABEL_REF, SYMBOL_REF, PLUS, CONST}}, \
+{"reg_or_nonsymb_mem_operand", {SUBREG, REG, MEM}}, \
+{"sparc_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, MEM}}, \
+{"move_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE, MEM}}, \
+{"splittable_symbolic_memory_operand", {MEM}}, \
+{"splittable_immediate_memory_operand", {MEM}}, \
+{"eq_or_neq", {EQ, NE}}, \
+{"normal_comp_operator", {GE, GT, LE, LT, GTU, LEU}}, \
+{"noov_compare_op", {NE, EQ, GE, GT, LE, LT, GEU, GTU, LEU, LTU}}, \
+{"v9_regcmp_op", {EQ, NE, GE, LT, LE, GT}}, \
+{"v8plus_regcmp_op", {EQ, NE}}, \
+{"extend_op", {SIGN_EXTEND, ZERO_EXTEND}}, \
+{"cc_arithop", {AND, IOR, XOR}}, \
+{"cc_arithopn", {AND, IOR}}, \
+{"arith_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith11_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith10_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT}}, \
+{"arith_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"arith11_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"arith10_double_operand", {SUBREG, REG, CONSTANT_P_RTX, CONST_INT, CONST_DOUBLE}}, \
+{"small_int", {CONST_INT, CONSTANT_P_RTX}}, \
+{"uns_small_int", {CONST_INT, CONSTANT_P_RTX}}, \
+{"uns_arith_operand", {SUBREG, REG, CONST_INT, CONSTANT_P_RTX}}, \
+{"clobbered_register", {REG}},
+
+
+/* The number of Pmode words for the setjmp buffer. */
+#define JMP_BUF_SIZE 12
+
+#define DONT_ACCESS_GBLS_AFTER_EPILOGUE (flag_pic)
+
+/* Declare functions defined in sparc.c and used in templates. */
+
+extern char *doublemove_string ();
+extern char *output_block_move ();
+extern char *output_cbranch ();
+extern char *output_fp_move_double ();
+extern char *output_fp_move_quad ();
+extern char *output_move_double ();
+extern char *output_move_quad ();
+extern char *output_return ();
+extern char *output_scc_insn ();
+extern char *output_v9branch ();
+extern char *singlemove_string ();
+
+extern void emit_v9_brxx_insn ();
+extern void finalize_pic ();
+extern void order_regs_for_local_alloc ();
+extern void output_double_int ();
+extern void output_function_epilogue ();
+extern void output_function_prologue ();
+extern void print_operand ();
+extern void sparc_flat_output_function_epilogue ();
+extern void sparc_flat_output_function_prologue ();
+
+extern int addrs_ok_for_ldd_peep ();
+extern int arith10_double_operand ();
+extern int arith10_operand ();
+extern int arith11_double_operand ();
+extern int arith11_operand ();
+extern int arith_double_operand ();
+extern int arith_operand ();
+extern int call_operand_address ();
+extern int cc_arithop ();
+extern int cc_arithopn ();
+extern int check_pic ();
+extern int compute_frame_size ();
+extern int data_segment_operand ();
+extern int eligible_for_epilogue_delay ();
+extern int eligible_for_return_delay ();
+extern int emit_move_sequence ();
+extern int extend_op ();
+extern int fcc_reg_operand ();
+extern int fp_zero_operand ();
+extern int icc_or_fcc_reg_operand ();
+extern int label_ref_operand ();
+extern int mem_aligned_8 ();
+extern int move_operand ();
+extern int noov_compare_op ();
+extern int pic_address_needs_scratch ();
+extern int reg_or_0_operand ();
+extern int reg_or_nonsymb_mem_operand ();
+extern int reg_unused_after ();
+extern int register_ok_for_ldd ();
+extern int registers_ok_for_ldd_peep ();
+extern int restore_operand ();
+extern int short_branch ();
+extern int small_int ();
+extern int sp64_medium_pic_operand ();
+extern int sparc_flat_eligible_for_epilogue_delay ();
+extern int sparc_flat_epilogue_delay_slots ();
+extern int sparc_issue_rate ();
+extern int sparc_operand ();
+extern int splittable_immediate_memory_operand ();
+extern int splittable_symbolic_memory_operand ();
+extern int supersparc_adjust_cost ();
+extern int symbolic_memory_operand ();
+extern int symbolic_operand ();
+extern int text_segment_operand ();
+extern int ultrasparc_adjust_cost ();
+extern int uns_small_int ();
+extern int v8plus_regcmp_op ();
+extern int v8plus_regcmp_p ();
+extern int v9_regcmp_op ();
+extern int v9_regcmp_p ();
+
+extern unsigned long sparc_flat_compute_frame_size ();
+extern unsigned long sparc_type_code ();
+
+extern char *sparc_v8plus_shift ();
+
+#ifdef __STDC__
+/* Function used for V8+ code generation. Returns 1 if the high
+ 32 bits of REG are 0 before INSN. */
+extern int sparc_check_64 (struct rtx_def *, struct rtx_def *);
+extern int sparc_return_peephole_ok (struct rtx_def *, struct rtx_def *);
+extern int compute_frame_size (int, int);
+#endif
+
+/* Defined in flags.h, but insn-emit.c does not include flags.h. */
+
+extern int flag_pic;
diff --git a/contrib/gcc/config/sparc/sparc.md b/contrib/gcc/config/sparc/sparc.md
new file mode 100644
index 0000000..ccfde05
--- /dev/null
+++ b/contrib/gcc/config/sparc/sparc.md
@@ -0,0 +1,6684 @@
+;;- Machine description for SPARC chip for GNU C compiler
+;; Copyright (C) 1987, 88, 89, 92-96, 1997 Free Software Foundation, Inc.
+;; Contributed by Michael Tiemann (tiemann@cygnus.com)
+;; 64 bit SPARC V9 support by Michael Tiemann, Jim Wilson, and Doug Evans,
+;; at Cygnus Support.
+
+;; This file is part of GNU CC.
+
+;; GNU CC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 2, or (at your option)
+;; any later version.
+
+;; GNU CC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GNU CC; see the file COPYING. If not, write to
+;; the Free Software Foundation, 59 Temple Place - Suite 330,
+;; Boston, MA 02111-1307, USA.
+
+;;- See file "rtl.def" for documentation on define_insn, match_*, et. al.
+
+;; The upper 32 fp regs on the v9 can't hold SFmode values. To deal with this
+;; a second register class, EXTRA_FP_REGS, exists for the v9 chip. The name
+;; is a bit of a misnomer as it covers all 64 fp regs. The corresponding
+;; constraint letter is 'e'. To avoid any confusion, 'e' is used instead of
+;; 'f' for all DF/TFmode values, including those that are specific to the v8.
+;;
+;; -mlive-g0 is *not* supported for TARGET_ARCH64, so we don't bother to
+;; test TARGET_LIVE_G0 if we have TARGET_ARCH64.
+
+;; Attribute for cpu type.
+;; These must match the values for enum processor_type in sparc.h.
+(define_attr "cpu" "v7,cypress,v8,supersparc,sparclite,f930,f934,sparclet,tsc701,v9,ultrasparc"
+ (const (symbol_ref "sparc_cpu_attr")))
+
+;; Attribute for the instruction set.
+;; At present we only need to distinguish v9/!v9, but for clarity we
+;; test TARGET_V8 too.
+(define_attr "isa" "v6,v8,v9,sparclet"
+ (const
+ (cond [(symbol_ref "TARGET_V9") (const_string "v9")
+ (symbol_ref "TARGET_V8") (const_string "v8")
+ (symbol_ref "TARGET_SPARCLET") (const_string "sparclet")]
+ (const_string "v6"))))
+
+;; Architecture size.
+(define_attr "arch" "arch32bit,arch64bit"
+ (const
+ (cond [(symbol_ref "TARGET_ARCH64") (const_string "arch64bit")]
+ (const_string "arch32bit"))))
+
+;; Whether -mlive-g0 is in effect.
+(define_attr "live_g0" "no,yes"
+ (const
+ (cond [(symbol_ref "TARGET_LIVE_G0") (const_string "yes")]
+ (const_string "no"))))
+
+;; Insn type. Used to default other attribute values.
+
+;; type "unary" insns have one input operand (1) and one output operand (0)
+;; type "binary" insns have two input operands (1,2) and one output (0)
+;; type "compare" insns have one or two input operands (0,1) and no output
+;; type "call_no_delay_slot" is a call followed by an unimp instruction.
+
+(define_attr "type"
+ "move,unary,binary,compare,load,sload,store,ialu,shift,uncond_branch,branch,call,call_no_delay_slot,return,address,imul,fpload,fpstore,fp,fpmove,fpcmove,fpcmp,fpmul,fpdivs,fpdivd,fpsqrt,cmove,multi,misc"
+ (const_string "binary"))
+
+;; Set true if insn uses call-clobbered intermediate register.
+(define_attr "use_clobbered" "false,true"
+ (if_then_else (and (eq_attr "type" "address")
+ (match_operand 0 "clobbered_register" ""))
+ (const_string "true")
+ (const_string "false")))
+
+;; Length (in # of insns).
+(define_attr "length" ""
+ (cond [(eq_attr "type" "load,sload,fpload")
+ (if_then_else (match_operand 1 "symbolic_memory_operand" "")
+ (const_int 2) (const_int 1))
+
+ (eq_attr "type" "store,fpstore")
+ (if_then_else (match_operand 0 "symbolic_memory_operand" "")
+ (const_int 2) (const_int 1))
+
+ (eq_attr "type" "address") (const_int 2)
+
+ (eq_attr "type" "binary")
+ (if_then_else (ior (match_operand 2 "arith_operand" "")
+ (match_operand 2 "arith_double_operand" ""))
+ (const_int 1) (const_int 3))
+
+ (eq_attr "type" "multi") (const_int 2)
+
+ (eq_attr "type" "move,unary")
+ (if_then_else (ior (match_operand 1 "arith_operand" "")
+ (match_operand 1 "arith_double_operand" ""))
+ (const_int 1) (const_int 2))]
+
+ (const_int 1)))
+
+(define_asm_attributes
+ [(set_attr "length" "1")
+ (set_attr "type" "multi")])
+
+;; Attributes for instruction and branch scheduling
+
+(define_attr "in_call_delay" "false,true"
+ (cond [(eq_attr "type" "uncond_branch,branch,call,call_no_delay_slot,return,multi")
+ (const_string "false")
+ (eq_attr "type" "load,fpload,store,fpstore")
+ (if_then_else (eq_attr "length" "1")
+ (const_string "true")
+ (const_string "false"))
+ (eq_attr "type" "address")
+ (if_then_else (eq_attr "use_clobbered" "false")
+ (const_string "true")
+ (const_string "false"))]
+ (if_then_else (eq_attr "length" "1")
+ (const_string "true")
+ (const_string "false"))))
+
+(define_delay (eq_attr "type" "call")
+ [(eq_attr "in_call_delay" "true") (nil) (nil)])
+
+(define_attr "leaf_function" "false,true"
+ (const (symbol_ref "leaf_function")))
+
+
+(define_attr "in_return_delay" "false,true"
+ (if_then_else (and (and (and (eq_attr "type" "move,load,sload,store,binary,ialu")
+ (eq_attr "length" "1"))
+ (eq_attr "leaf_function" "false"))
+ (match_insn "eligible_for_return_delay"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_delay (and (eq_attr "type" "return")
+ (eq_attr "isa" "v9"))
+ [(eq_attr "in_return_delay" "true") (nil) (nil)])
+
+;; ??? Should implement the notion of predelay slots for floating point
+;; branches. This would allow us to remove the nop always inserted before
+;; a floating point branch.
+
+;; ??? It is OK for fill_simple_delay_slots to put load/store instructions
+;; in a delay slot, but it is not OK for fill_eager_delay_slots to do so.
+;; This is because doing so will add several pipeline stalls to the path
+;; that the load/store did not come from. Unfortunately, there is no way
+;; to prevent fill_eager_delay_slots from using load/store without completely
+;; disabling them. For the SPEC benchmark set, this is a serious lose,
+;; because it prevents us from moving back the final store of inner loops.
+
+(define_attr "in_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_attr "in_uncond_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_attr "in_annul_branch_delay" "false,true"
+ (if_then_else (and (eq_attr "type" "!uncond_branch,branch,call,call_no_delay_slot,multi")
+ (eq_attr "length" "1"))
+ (const_string "true")
+ (const_string "false")))
+
+(define_delay (eq_attr "type" "branch")
+ [(eq_attr "in_branch_delay" "true")
+ (nil) (eq_attr "in_annul_branch_delay" "true")])
+
+(define_delay (eq_attr "type" "uncond_branch")
+ [(eq_attr "in_uncond_branch_delay" "true")
+ (nil) (nil)])
+
+;; Function units of the SPARC
+
+;; (define_function_unit {name} {num-units} {n-users} {test}
+;; {ready-delay} {issue-delay} [{conflict-list}])
+
+;; The integer ALU.
+;; (Noted only for documentation; units that take one cycle do not need to
+;; be specified.)
+
+;; On the sparclite, integer multiply takes 1, 3, or 5 cycles depending on
+;; the inputs.
+
+;; (define_function_unit "alu" 1 0
+;; (eq_attr "type" "unary,binary,move,address") 1 0)
+
+;; ---- cypress CY7C602 scheduling:
+;; Memory with load-delay of 1 (i.e., 2 cycle load).
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "load,sload,fpload"))
+ 2 2)
+
+;; SPARC has two floating-point units: the FP ALU,
+;; and the FP MUL/DIV/SQRT unit.
+;; Instruction timings on the CY7C602 are as follows
+;; FABSs 4
+;; FADDs/d 5/5
+;; FCMPs/d 4/4
+;; FDIVs/d 23/37
+;; FMOVs 4
+;; FMULs/d 5/7
+;; FNEGs 4
+;; FSQRTs/d 34/63
+;; FSUBs/d 5/5
+;; FdTOi/s 5/5
+;; FsTOi/d 5/5
+;; FiTOs/d 9/5
+
+;; The CY7C602 can only support 2 fp isnsn simultaneously.
+;; More insns cause the chip to stall.
+
+(define_function_unit "fp_alu" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fp,fpmove"))
+ 5 5)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpmul"))
+ 7 7)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpdivs,fpdivd"))
+ 37 37)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "cypress")
+ (eq_attr "type" "fpsqrt"))
+ 63 63)
+
+;; ----- The TMS390Z55 scheduling
+;; The Supersparc can issue 1 - 3 insns per cycle: up to two integer,
+;; one ld/st, one fp.
+;; Memory delivers its result in one cycle to IU, zero cycles to FP
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload"))
+ 1 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpload"))
+ 0 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
+(define_function_unit "shift" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+;; There are only two write ports to the integer register file
+;; A store also uses a write port
+
+(define_function_unit "iwport" 2 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "load,sload,store,shift,ialu"))
+ 1 1)
+
+;; Timings; throughput/latency
+;; FADD 1/3 add/sub, format conv, compar, abs, neg
+;; FMUL 1/3
+;; FDIVs 4/6
+;; FDIVd 7/9
+;; FSQRTs 6/8
+;; FSQRTd 10/12
+;; IMUL 4/4
+
+(define_function_unit "fp_alu" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fp,fpmove,fpcmp"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpmul"))
+ 3 1)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivs"))
+ 6 4)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpdivd"))
+ 9 7)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "fpsqrt"))
+ 12 10)
+
+(define_function_unit "fp_mds" 1 0
+ (and (eq_attr "cpu" "supersparc")
+ (eq_attr "type" "imul"))
+ 4 4)
+
+;; ----- sparclet tsc701 scheduling
+;; The tsc701 issues 1 insn per cycle.
+;; Results may be written back out of order.
+
+;; Loads take 2 extra cycles to complete and 4 can be buffered at a time.
+
+(define_function_unit "tsc701_load" 4 1
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "load,sload"))
+ 3 1)
+
+;; Stores take 2(?) extra cycles to complete.
+;; It is desirable to not have any memory operation in the following 2 cycles.
+;; (??? or 2 memory ops in the case of std).
+
+(define_function_unit "tsc701_store" 1 0
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "store"))
+ 3 3
+ [(eq_attr "type" "load,sload,store")])
+
+;; The multiply unit has a latency of 5.
+(define_function_unit "tsc701_mul" 1 0
+ (and (eq_attr "cpu" "tsc701")
+ (eq_attr "type" "imul"))
+ 5 5)
+
+;; ----- The UltraSPARC-1 scheduling
+;; UltraSPARC has two integer units. Shift instructions can only execute
+;; on IE0. Condition code setting instructions, call, and jmpl (including
+;; the ret and retl pseudo-instructions) can only execute on IE1.
+;; Branch on register uses IE1, but branch on condition code does not.
+;; Conditional moves take 2 cycles. No other instruction can issue in the
+;; same cycle as a conditional move.
+;; Multiply and divide take many cycles during which no other instructions
+;; can issue.
+;; Memory delivers its result in two cycles (except for signed loads,
+;; which take one cycle more). One memory instruction can be issued per
+;; cycle.
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "load,fpload"))
+ 2 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "sload"))
+ 3 1)
+
+(define_function_unit "memory" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "store,fpstore"))
+ 1 1)
+
+(define_function_unit "ieu" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "ialu,binary,shift,compare,cmove,call"))
+ 1 1)
+
+(define_function_unit "ieu_shift" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "shift"))
+ 1 1)
+
+(define_function_unit "ieu_shift" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "cmove"))
+ 2 1)
+
+;; Timings; throughput/latency
+;; FMOV 1/1 fmov, fabs, fneg
+;; FMOVcc 1/2
+;; FADD 1/4 add/sub, format conv, compar
+;; FMUL 1/4
+;; FDIVs 12/12
+;; FDIVd 22/22
+;; FSQRTs 12/12
+;; FSQRTd 22/22
+;; FCMP takes 1 cycle to branch, 2 cycles to conditional move.
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmove"))
+ 1 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fp"))
+ 4 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmp"))
+ 2 1)
+
+(define_function_unit "fmul" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpmul"))
+ 4 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpcmove"))
+ 2 1)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivs"))
+ 12 12)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpdivd"))
+ 22 22)
+
+(define_function_unit "fadd" 1 0
+ (and (eq_attr "cpu" "ultrasparc")
+ (eq_attr "type" "fpsqrt"))
+ 12 12)
+
+;; Compare instructions.
+;; This controls RTL generation and register allocation.
+
+;; We generate RTL for comparisons and branches by having the cmpxx
+;; patterns store away the operands. Then, the scc and bcc patterns
+;; emit RTL for both the compare and the branch.
+;;
+;; We do this because we want to generate different code for an sne and
+;; seq insn. In those cases, if the second operand of the compare is not
+;; const0_rtx, we want to compute the xor of the two operands and test
+;; it against zero.
+;;
+;; We start with the DEFINE_EXPANDs, then the DEFINE_INSNs to match
+;; the patterns. Finally, we have the DEFINE_SPLITs for some of the scc
+;; insns that actually require more than one machine instruction.
+
+;; Put cmpsi first among compare insns so it matches two CONST_INT operands.
+
+(define_expand "cmpsi"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "arith_operand" "")))]
+ ""
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdi"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "")
+ (match_operand:DI 1 "arith_double_operand" "")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpsf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmpdf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+(define_expand "cmptf"
+ ;; The 96 here isn't ever used by anyone.
+ [(set (reg:CCFP 96)
+ (compare:CCFP (match_operand:TF 0 "register_operand" "")
+ (match_operand:TF 1 "register_operand" "")))]
+ "TARGET_FPU"
+ "
+{
+ sparc_compare_op0 = operands[0];
+ sparc_compare_op1 = operands[1];
+ DONE;
+}")
+
+;; Now the compare DEFINE_INSNs.
+
+(define_insn "*cmpsi_insn"
+ [(set (reg:CC 100)
+ (compare:CC (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
+ "cmp %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmpdi_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "cmp %0,%1"
+ [(set_attr "type" "compare")])
+
+(define_insn "cmpdi_v8plus"
+ [(set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 0 "register_operand" "r,r,r")
+ (match_operand:DI 1 "arith_double_operand" "J,I,r")))
+ (clobber (match_scratch:SI 2 "=&h,&h,&h"))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*
+{
+ /* The srl can be omitted if the value in the %L0 or %L1 is already
+ zero extended. */
+
+ output_asm_insn (\"sllx %H0,32,%2\", operands);
+
+ if (sparc_check_64 (operands[0], insn) <= 0)
+ output_asm_insn (\"srl %L0,0,%L0\", operands);
+
+ switch (which_alternative)
+ {
+ case 0:
+ return \"orcc %L0,%2,%%g0\";
+ case 1:
+ return \"or %L0,%2,%2\;cmp %2,%1\";
+ case 2:
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn (\"srl %L1,0,%L1\", operands);
+ return \"sllx %H1,32,%3\;or %L0,%2,%2\;or %L1,%3,%3\;cmp %2,%3\";
+ default:
+ abort();
+ }
+}"
+ [(set_attr "length" "3,4,7")])
+
+(define_insn "*cmpsf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpes %0,%1,%2\";
+ return \"fcmpes %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpdf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmped %0,%1,%2\";
+ return \"fcmped %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmptf_fpe"
+ [(set (match_operand:CCFPE 0 "fcc_reg_operand" "=c")
+ (compare:CCFPE (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpeq %0,%1,%2\";
+ return \"fcmpeq %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpsf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmps %0,%1,%2\";
+ return \"fcmps %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmpdf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpd %0,%1,%2\";
+ return \"fcmpd %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+(define_insn "*cmptf_fp"
+ [(set (match_operand:CCFP 0 "fcc_reg_operand" "=c")
+ (compare:CCFP (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "*
+{
+ if (TARGET_V9)
+ return \"fcmpq %0,%1,%2\";
+ return \"fcmpq %1,%2\";
+}"
+ [(set_attr "type" "fpcmp")])
+
+;; Next come the scc insns. For seq, sne, sgeu, and sltu, we can do this
+;; without jumps using the addx/subx instructions. For seq/sne on v9 we use
+;; the same code as v8 (the addx/subx method has more applications). The
+;; exception to this is "reg != 0" which can be done in one instruction on v9
+;; (so we do it). For the rest, on v9 we use conditional moves; on v8, we do
+;; branches.
+
+;; Seq_special[_xxx] and sne_special[_xxx] clobber the CC reg, because they
+;; generate addcc/subcc instructions.
+
+(define_expand "seqsi_special"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "! TARGET_LIVE_G0"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "seqdi_special"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (eq:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "snesi_special"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "! TARGET_LIVE_G0"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "snedi_special"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:DI 0 "register_operand" "")
+ (ne:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "seqdi_special_trunc"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (eq:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "snedi_special_trunc"
+ [(set (match_dup 3)
+ (xor:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))
+ (set (match_operand:SI 0 "register_operand" "")
+ (ne:DI (match_dup 3) (const_int 0)))]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (DImode); }")
+
+(define_expand "seqsi_special_extend"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (eq:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+(define_expand "snesi_special_extend"
+ [(set (match_dup 3)
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (parallel [(set (match_operand:DI 0 "register_operand" "")
+ (ne:SI (match_dup 3) (const_int 0)))
+ (clobber (reg:CC 100))])]
+ "TARGET_ARCH64"
+ "{ operands[3] = gen_reg_rtx (SImode); }")
+
+;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
+;; However, the code handles both SImode and DImode.
+(define_expand "seq"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (eq:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == SImode)
+ {
+ rtx pat;
+
+ if (GET_MODE (operands[0]) == SImode)
+ pat = gen_seqsi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else if (! TARGET_ARCH64)
+ FAIL;
+ else
+ pat = gen_seqsi_special_extend (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == DImode)
+ {
+ rtx pat;
+
+ if (! TARGET_ARCH64)
+ FAIL;
+ else if (GET_MODE (operands[0]) == SImode)
+ pat = gen_seqdi_special_trunc (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else
+ pat = gen_seqdi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (EQ, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; ??? v9: Operand 0 needs a mode, so SImode was chosen.
+;; However, the code handles both SImode and DImode.
+(define_expand "sne"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ne:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == SImode)
+ {
+ rtx pat;
+
+ if (GET_MODE (operands[0]) == SImode)
+ pat = gen_snesi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else if (! TARGET_ARCH64)
+ FAIL;
+ else
+ pat = gen_snesi_special_extend (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == DImode)
+ {
+ rtx pat;
+
+ if (! TARGET_ARCH64)
+ FAIL;
+ else if (GET_MODE (operands[0]) == SImode)
+ pat = gen_snedi_special_trunc (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ else
+ pat = gen_snedi_special (operands[0], sparc_compare_op0,
+ sparc_compare_op1);
+ emit_insn (pat);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (NE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgt"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (gt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (GT, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "slt"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (lt:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (LT, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sge"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ge:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (GE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sle"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (le:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_insn (gen_sne (operands[0]));
+ DONE;
+ }
+ else if (TARGET_V9)
+ {
+ if (gen_v9_scc (LE, operands))
+ DONE;
+ /* fall through */
+ }
+ operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgtu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (gtu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (! TARGET_V9)
+ {
+ rtx tem;
+
+ /* We can do ltu easily, so if both operands are registers, swap them and
+ do a LTU. */
+ if ((GET_CODE (sparc_compare_op0) == REG
+ || GET_CODE (sparc_compare_op0) == SUBREG)
+ && (GET_CODE (sparc_compare_op1) == REG
+ || GET_CODE (sparc_compare_op1) == SUBREG))
+ {
+ tem = sparc_compare_op0;
+ sparc_compare_op0 = sparc_compare_op1;
+ sparc_compare_op1 = tem;
+ emit_insn (gen_sltu (operands[0]));
+ DONE;
+ }
+ }
+ else
+ {
+ if (gen_v9_scc (GTU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sltu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (ltu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (TARGET_V9)
+ {
+ if (gen_v9_scc (LTU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sgeu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (geu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (TARGET_V9)
+ {
+ if (gen_v9_scc (GEU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "sleu"
+ [(set (match_operand:SI 0 "intreg_operand" "")
+ (leu:SI (match_dup 1) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "
+{
+ if (! TARGET_V9)
+ {
+ rtx tem;
+
+ /* We can do geu easily, so if both operands are registers, swap them and
+ do a GEU. */
+ if ((GET_CODE (sparc_compare_op0) == REG
+ || GET_CODE (sparc_compare_op0) == SUBREG)
+ && (GET_CODE (sparc_compare_op1) == REG
+ || GET_CODE (sparc_compare_op1) == SUBREG))
+ {
+ tem = sparc_compare_op0;
+ sparc_compare_op0 = sparc_compare_op1;
+ sparc_compare_op1 = tem;
+ emit_insn (gen_sgeu (operands[0]));
+ DONE;
+ }
+ }
+ else
+ {
+ if (gen_v9_scc (LEU, operands))
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; Now the DEFINE_INSNs for the scc cases.
+
+;; The SEQ and SNE patterns are special because they can be done
+;; without any branching and do not involve a COMPARE.
+
+(define_insn "*snesi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_snesi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*snesi_zero_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%%g0\;addx %%g0,0,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*snedi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_snedi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (neg:DI (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,-1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*snedi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (ne:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrnz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*seqsi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_seqsi_zero"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*seqsi_zero_extend"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%%g0\;subx %%g0,-1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "2")])
+
+(define_insn "*seqdi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*neg_seqdi_zero"
+ [(set (match_operand:DI 0 "register_operand" "=&r")
+ (neg:DI (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0))))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,-1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+(define_insn "*seqdi_zero_trunc"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (eq:DI (match_operand:DI 1 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "mov 0,%0\;movrz %1,1,%0"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+;; We can also do (x + (i == 0)) and related, so put them in.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*x_plus_i_ne_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %2,0,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_minus_i_ne_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (ne:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %2,0,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_plus_i_eq_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;subx %2,-1,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*x_minus_i_eq_0"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 2 "register_operand" "r")
+ (eq:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%%g0\;addx %2,-1,%0"
+ [(set_attr "length" "2")])
+
+;; We can also do GEU and LTU directly, but these operate after a compare.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ltu:SI (reg:CC 100) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,0,%0"
+ [(set_attr "type" "misc")])
+
+(define_insn "*neg_sltu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,0,%0"
+ [(set_attr "type" "misc")])
+
+;; ??? Combine should canonicalize these next two to the same pattern.
+(define_insn "*neg_sltu_minus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (neg:SI (ltu:SI (reg:CC 100) (const_int 0)))
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*neg_sltu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "arith_operand" "rI"))))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (geu:SI (reg:CC 100) (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "subx %%g0,-1,%0"
+ [(set_attr "type" "misc")])
+
+(define_insn "*neg_sgeu_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,-1,%0"
+ [(set_attr "type" "misc")])
+
+;; We can also do (x + ((unsigned) i >= 0)) and related, so put them in.
+;; ??? The addx/subx insns use the 32 bit carry flag so there are no DImode
+;; versions for v9.
+
+(define_insn "*sltu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "arith_operand" "rI")))]
+ "! TARGET_LIVE_G0"
+ "addx %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*sltu_plus_x_plus_y"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))))]
+ ""
+ "addx %1,%2,%0")
+
+(define_insn "*x_minus_sltu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "subx %1,0,%0"
+ [(set_attr "type" "unary")])
+
+;; ??? Combine should canonicalize these next two to the same pattern.
+(define_insn "*x_minus_y_minus_sltu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "subx %1,%2,%0")
+
+(define_insn "*x_minus_sltu_plus_y"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 2 "arith_operand" "rI"))))]
+ ""
+ "subx %1,%2,%0")
+
+(define_insn "*sgeu_plus_x"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (geu:SI (reg:CC 100) (const_int 0))
+ (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "subx %1,-1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*x_minus_sgeu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (geu:SI (reg:CC 100) (const_int 0))))]
+ ""
+ "addx %1,-1,%0"
+ [(set_attr "type" "unary")])
+
+;; Now we have the generic scc insns.
+;; !v9: These will be done using a jump.
+;; v9: Use conditional moves which are defined elsewhere.
+;; We have to exclude the cases above, since we will not want combine to
+;; turn something that does not require a jump into something that does.
+
+(define_insn "*scc_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ ""
+ "* return output_scc_insn (operands, insn); "
+ [(set_attr "type" "multi")
+ (set_attr "length" "3")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operator:SI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ ;; 32 bit LTU/GEU are better implemented using addx/subx
+ "TARGET_V9 && REGNO (operands[1]) == SPARC_ICC_REG
+ && (GET_MODE (operands[1]) == CCXmode
+ || (GET_CODE (operands[2]) != LTU && GET_CODE (operands[2]) != GEU))"
+ [(set (match_dup 0) (const_int 0))
+ (set (match_dup 0)
+ (if_then_else:SI (match_op_dup:SI 2 [(match_dup 1) (const_int 0)])
+ (const_int 1)
+ (match_dup 0)))]
+ "")
+
+(define_insn "*scc_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operator:DI 2 "noov_compare_op"
+ [(match_operand 1 "icc_or_fcc_reg_operand" "")
+ (const_int 0)]))]
+ "TARGET_ARCH64"
+ "* return output_scc_insn (operands, insn); "
+ [(set_attr "type" "multi")
+ (set_attr "length" "3")])
+
+;; These control RTL generation for conditional jump insns
+
+;; The quad-word fp compare library routines all return nonzero to indicate
+;; true, which is different from the equivalent libgcc routines, so we must
+;; handle them specially here.
+
+(define_expand "beq"
+ [(set (pc)
+ (if_then_else (eq (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (EQ, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, EQ);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (EQ, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bne"
+ [(set (pc)
+ (if_then_else (ne (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (NE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, NE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (NE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgt"
+ [(set (pc)
+ (if_then_else (gt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (GT, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgtu"
+ [(set (pc)
+ (if_then_else (gtu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "blt"
+ [(set (pc)
+ (if_then_else (lt (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (LT, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LT);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LT, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bltu"
+ [(set (pc)
+ (if_then_else (ltu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LTU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bge"
+ [(set (pc)
+ (if_then_else (ge (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (GE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, GE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (GE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bgeu"
+ [(set (pc)
+ (if_then_else (geu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (GEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "ble"
+ [(set (pc)
+ (if_then_else (le (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{
+ if (TARGET_ARCH64 && sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode)
+ {
+ emit_v9_brxx_insn (LE, sparc_compare_op0, operands[0]);
+ DONE;
+ }
+ else if (GET_MODE (sparc_compare_op0) == TFmode && ! TARGET_HARD_QUAD)
+ {
+ emit_float_lib_cmp (sparc_compare_op0, sparc_compare_op1, LE);
+ emit_jump_insn (gen_bne (operands[0]));
+ DONE;
+ }
+ operands[1] = gen_compare_reg (LE, sparc_compare_op0, sparc_compare_op1);
+}")
+
+(define_expand "bleu"
+ [(set (pc)
+ (if_then_else (leu (match_dup 1) (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+ "
+{ operands[1] = gen_compare_reg (LEU, sparc_compare_op0, sparc_compare_op1);
+}")
+
+;; Now match both normal and inverted jump.
+
+(define_insn "*normal_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "noov_compare_op"
+ [(reg 100) (const_int 0)])
+ (label_ref (match_operand 1 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[0], 1, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_branch"
+ [(set (pc)
+ (if_then_else (match_operator 0 "noov_compare_op"
+ [(reg 100) (const_int 0)])
+ (pc)
+ (label_ref (match_operand 1 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[0], 1, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*normal_fp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFP 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_fp_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFP 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*normal_fpe_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_fpe_branch"
+ [(set (pc)
+ (if_then_else (match_operator 1 "comparison_operator"
+ [(match_operand:CCFPE 0 "fcc_reg_operand" "c")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "*
+{
+ return output_cbranch (operands[1], 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence, insn);
+}"
+ [(set_attr "type" "branch")])
+
+;; Sparc V9-specific jump insns. None of these are guaranteed to be
+;; in the architecture.
+
+;; There are no 32 bit brreg insns.
+
+(define_insn "*normal_int_branch_sp64"
+ [(set (pc)
+ (if_then_else (match_operator 0 "v9_regcmp_op"
+ [(match_operand:DI 1 "register_operand" "r")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ "TARGET_ARCH64"
+ "*
+{
+ return output_v9branch (operands[0], 1, 2, 0,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence);
+}"
+ [(set_attr "type" "branch")])
+
+(define_insn "*inverted_int_branch_sp64"
+ [(set (pc)
+ (if_then_else (match_operator 0 "v9_regcmp_op"
+ [(match_operand:DI 1 "register_operand" "r")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ "TARGET_ARCH64"
+ "*
+{
+ return output_v9branch (operands[0], 1, 2, 1,
+ final_sequence && INSN_ANNULLED_BRANCH_P (insn),
+ ! final_sequence);
+}"
+ [(set_attr "type" "branch")])
+
+;; Esoteric move insns (lo_sum, high, pic).
+
+(define_insn "*lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "immediate_operand" "in")))]
+ ""
+ ;; V9 needs "add" because of the code models. We still use "or" for v8
+ ;; so we can compare the old compiler with the new.
+ "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
+;; confuse them with real addresses.
+(define_insn "pic_lo_sum_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:SI 2 "immediate_operand" "in")] 0)))]
+ "flag_pic"
+ ;; V9 needs "add" because of the code models. We still use "or" for v8
+ ;; so we can compare the old compiler with the new.
+ "* return TARGET_ARCH64 ? \"add %1,%%lo(%a2),%0\" : \"or %1,%%lo(%a2),%0\";"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; The PIC version of sethi must appear before the non-pic case so that
+;; the unspec will not be matched as part of the operand.
+;; For PIC, symbol_refs are put inside unspec so that the optimizer will not
+;; confuse them with real addresses.
+(define_insn "pic_sethi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
+ "flag_pic && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "pic_lo_sum_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:SI (match_operand:DI 1 "register_operand" "r")
+ (unspec:SI [(match_operand:DI 2 "immediate_operand" "in")] 0)))]
+ "TARGET_ARCH64 && flag_pic"
+ "add %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "pic_sethi_di"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:SI (unspec:SI [(match_operand 1 "" "")] 0)))]
+ "TARGET_ARCH64 && flag_pic && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "get_pc"
+ [(clobber (reg:SI 15))
+ (set (match_operand 0 "register_operand" "=r")
+ (unspec [(match_operand 1 "" "") (match_operand 2 "" "")] 2))]
+ "flag_pic && REGNO (operands[0]) == 23"
+ "sethi %%hi(%a1-4),%0\;call %a2\;add %0,%%lo(%a1+4),%0"
+ [(set_attr "length" "3")])
+
+(define_insn "get_pc_via_rdpc"
+ [(set (match_operand 0 "register_operand" "=r") (pc))]
+ "TARGET_V9"
+ "rd %%pc,%0"
+ [(set_attr "type" "move")])
+
+(define_insn "*sethi_hi"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (high:HI (match_operand 1 "" "")))]
+ "check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+;; This must appear after the PIC sethi so that the PIC unspec will not
+;; be matched as part of the operand.
+(define_insn "*sethi_si"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (high:SI (match_operand 1 "" "")))]
+ "check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*lo_sum_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "immediate_operand" "in")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ /* Don't output a 64 bit constant, since we can't trust the assembler to
+ handle it correctly. */
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && HOST_BITS_PER_WIDE_INT > 32
+ && INTVAL (operands[2]) > 0xffffffff)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
+
+ return \"or %L1,%%lo(%a2),%L0\";
+}"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+;; ??? Optimizer does not handle "or %o1,%lo(0),%o1". How about add?
+
+(define_insn "*lo_sum_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lo_sum:DI (match_operand:DI 1 "register_operand" "0")
+ (match_operand:DI 2 "immediate_operand" "in")))]
+ "TARGET_ARCH64"
+ "*
+{
+ /* Don't output a 64 bit constant, since we can't trust the assembler to
+ handle it correctly. */
+ if (GET_CODE (operands[2]) == CONST_DOUBLE)
+ operands[2] = GEN_INT (CONST_DOUBLE_LOW (operands[2]));
+ else if (GET_CODE (operands[2]) == CONST_INT
+ && HOST_BITS_PER_WIDE_INT > 32
+ && INTVAL (operands[2]) > 0xffffffff)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0xffffffff);
+
+ /* Note that we use add here. This is important because Medium/Anywhere
+ code model support depends on it. */
+ return \"add %1,%%lo(%a2),%0\";
+}"
+ ;; Need to set length for this arith insn because operand2
+ ;; is not an "arith_operand".
+ [(set_attr "length" "1")])
+
+(define_insn "*sethi_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "" "")))]
+ "! TARGET_ARCH64 && check_pic (1)"
+ "*
+{
+ rtx op0 = operands[0];
+ rtx op1 = operands[1];
+
+ if (GET_CODE (op1) == CONST_INT)
+ {
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ if (INTVAL (op1) < 0)
+ return \"mov -1,%0\";
+ else
+ return \"mov 0,%0\";
+ }
+ else if (GET_CODE (op1) == CONST_DOUBLE)
+ {
+ operands[0] = operand_subword (op0, 1, 0, DImode);
+ operands[1] = GEN_INT (CONST_DOUBLE_LOW (op1));
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+
+ operands[0] = operand_subword (op0, 0, 0, DImode);
+ operands[1] = GEN_INT (CONST_DOUBLE_HIGH (op1));
+ return singlemove_string (operands);
+ }
+ else
+ abort ();
+ return \"\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "length" "2")])
+
+;;; ??? This pattern originally clobbered a scratch register. However, this
+;;; is invalid, the movdi pattern may not use a temp register because it
+;;; may be called from reload to reload a DImode value. In that case, we
+;;; end up with a scratch register that never gets allocated. To avoid this,
+;;; we use global register 1 which is never otherwise used by gcc as a temp.
+;;; The correct solution here might be to force DImode constants to memory,
+;;; e.g. by using a toc like the romp and rs6000 ports do for addresses, reg
+;;; 1 will then no longer need to be considered a fixed reg.
+
+(define_expand "sethi_di_sp64"
+ [(parallel
+ [(set (match_operand:DI 0 "register_operand" "")
+ (high:DI (match_operand 1 "general_operand" "")))
+ (clobber (reg:DI 1))])]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*sethi_di_sp64_const"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "const_double_operand" "")))
+ (clobber (reg:DI 1))]
+ "TARGET_ARCH64 && check_pic (1)"
+ "*
+{
+#if HOST_BITS_PER_WIDE_INT == 32
+ rtx high, low;
+
+ split_double (operands[1], &high, &low);
+
+ if (high == const0_rtx)
+ {
+ operands[1] = low;
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ }
+ else
+ {
+ operands[1] = high;
+ output_asm_insn (singlemove_string (operands), operands);
+
+ operands[1] = low;
+ output_asm_insn (\"sllx %0,32,%0\", operands);
+ if (low != const0_rtx)
+ output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
+ }
+#else
+ rtx op = operands[1];
+
+ if (! SPARC_SETHI_P (INTVAL(op)))
+ {
+ operands[1] = GEN_INT (INTVAL (op) >> 32);
+ output_asm_insn (singlemove_string (operands), operands);
+
+ output_asm_insn (\"sllx %0,32,%0\", operands);
+ if (INTVAL (op) & 0xffffffff)
+ {
+ operands[1] = GEN_INT (INTVAL (op) & 0xffffffff);
+ output_asm_insn (\"sethi %%hi(%a1),%%g1; or %0,%%g1,%0\", operands);
+ }
+ }
+ else
+ {
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ }
+#endif
+
+ return \"\";
+}"
+ [(set_attr "type" "move")
+ (set_attr "length" "5")])
+
+;; Most of the required support for the various code models is here.
+;; We can do this because sparcs need the high insn to load the address. We
+;; just need to get high to do the right thing for each code model. Then each
+;; uses the same "%X+%lo(...)" in the load/store insn, though in the case of
+;; the medium/middle code model "%lo" is written "%l44".
+
+;; When TARGET_CM_MEDLOW, assume that the upper 32 bits of symbol addresses are
+;; always 0.
+;; When TARGET_CM_MEDMID, the executable must be in the low 16 TB of memory.
+;; This corresponds to the low 44 bits, and the %[hml]44 relocs are used.
+;; ??? Not implemented yet.
+;; When TARGET_CM_EMBMEDANY, the text and data segments have a maximum size of
+;; 31 bits and may be located anywhere. EMBMEDANY_BASE_REG contains the start
+;; address of the data segment, currently %g4.
+;; When TARGET_CM_MEDANY, the text and data segments have a maximum size of 31
+;; bits and may be located anywhere. The maximum offset from any instruction
+;; to the label _GLOBAL_OFFSET_TABLE_ is 31 bits.
+
+(define_insn "*sethi_di_medlow"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_MEDLOW && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+(define_insn "*sethi_di_medium_pic"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "sp64_medium_pic_operand" "")))]
+ "(TARGET_CM_MEDLOW || TARGET_CM_EMBMEDANY) && check_pic (1)"
+ "sethi %%hi(%a1),%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "1")])
+
+;; WARNING: %0 gets %hi(%1)+%g4.
+;; You cannot OR in %lo(%1), it must be added in.
+
+(define_insn "*sethi_di_embmedany_data"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "data_segment_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi %%hi(%a1),%0; add %0,%_,%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "2")])
+
+(define_insn "*sethi_di_embmedany_text"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (high:DI (match_operand 1 "text_segment_operand" "")))
+ ;; The clobber is here because emit_move_sequence assumes the worst case.
+ (clobber (reg:DI 1))]
+ "TARGET_CM_EMBMEDANY && check_pic (1)"
+ "sethi %%uhi(%a1),%%g1; or %%g1,%%ulo(%a1),%%g1; sllx %%g1,32,%%g1; sethi %%hi(%a1),%0; or %0,%%g1,%0"
+ [(set_attr "type" "move")
+ (set_attr "length" "5")])
+
+;; Move instructions
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, QImode))
+ DONE;
+}")
+
+(define_insn "*movqi_insn"
+ [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
+ (match_operand:QI 1 "move_operand" "rI,K,Q,rJ"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)
+ || operands[1] == const0_rtx)"
+ "@
+ mov %1,%0
+ sethi %%hi(%a1),%0
+ ldub %1,%0
+ stb %r1,%0"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "1")])
+
+(define_insn "*movqi_insn_liveg0"
+ [(set (match_operand:QI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
+ (match_operand:QI 1 "move_operand" "r,J,I,K,Q,r"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ sethi %%hi(%a1),%0
+ ldub %1,%0
+ stb %1,%0"
+ [(set_attr "type" "move,move,move,move,load,store")
+ (set_attr "length" "1,1,2,1,1,1")])
+
+(define_insn "*lo_sum_qi"
+ [(set (match_operand:QI 0 "register_operand" "=r")
+ (subreg:QI (lo_sum:SI (match_operand:QI 1 "register_operand" "r")
+ (match_operand 2 "immediate_operand" "in")) 0))]
+ ""
+ "or %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "*store_qi"
+ [(set (mem:QI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:QI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;stb %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, HImode))
+ DONE;
+}")
+
+(define_insn "*movhi_insn"
+ [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q")
+ (match_operand:HI 1 "move_operand" "rI,K,Q,rJ"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode)
+ || operands[1] == const0_rtx)"
+ "@
+ mov %1,%0
+ sethi %%hi(%a1),%0
+ lduh %1,%0
+ sth %r1,%0"
+ [(set_attr "type" "move,move,load,store")
+ (set_attr "length" "1")])
+
+(define_insn "*movhi_insn_liveg0"
+ [(set (match_operand:HI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,r,r,Q")
+ (match_operand:HI 1 "move_operand" "r,J,I,K,Q,r"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ sethi %%hi(%a1),%0
+ lduh %1,%0
+ sth %1,%0"
+ [(set_attr "type" "move,move,move,move,load,store")
+ (set_attr "length" "1,1,2,1,1,1")])
+
+(define_insn "*lo_sum_hi"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (lo_sum:HI (match_operand:HI 1 "register_operand" "r")
+ (match_operand 2 "immediate_operand" "in")))]
+ ""
+ "or %1,%%lo(%a2),%0"
+ [(set_attr "length" "1")])
+
+(define_insn "*store_hi"
+ [(set (mem:HI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:HI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;sth %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SImode))
+ DONE;
+}")
+
+;; We must support both 'r' and 'f' registers here, because combine may
+;; convert SFmode hard registers to SImode hard registers when simplifying
+;; subreg sets.
+
+;; We cannot combine the similar 'r' and 'f' constraints, because it causes
+;; problems with register allocation. Reload might try to put an integer
+;; in an fp register, or an fp number is an integer register.
+
+(define_insn "*movsi_insn"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,f,r,r,f,Q,Q,d")
+ (match_operand:SI 1 "move_operand" "rI,!f,K,Q,!Q,rJ,!f,J"))]
+ "! TARGET_LIVE_G0
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode)
+ || operands[1] == const0_rtx)
+ && (GET_CODE (operands[0]) != REG || ! CONSTANT_P (operands[1])
+ || REGNO (operands[0]) < 32
+ || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)"
+ "@
+ mov %1,%0
+ fmovs %1,%0
+ sethi %%hi(%a1),%0
+ ld %1,%0
+ ld %1,%0
+ st %r1,%0
+ st %1,%0
+ fzeros %0"
+ [(set_attr "type" "move,fpmove,move,load,fpload,store,fpstore,fpmove")
+ (set_attr "length" "1")])
+
+(define_insn "*movsi_insn_liveg0"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,f,r,r,f,Q,Q")
+ (match_operand:SI 1 "move_operand" "r,J,I,!f,K,Q,!Q,r,!f"))]
+ "TARGET_LIVE_G0
+ && (register_operand (operands[0], SImode)
+ || register_operand (operands[1], SImode))"
+ "@
+ mov %1,%0
+ and %0,0,%0
+ and %0,0,%0\;or %0,%1,%0
+ fmovs %1,%0
+ sethi %%hi(%a1),%0
+ ld %1,%0
+ ld %1,%0
+ st %1,%0
+ st %1,%0"
+ [(set_attr "type" "move,move,move,fpmove,move,load,fpload,store,fpstore")
+ (set_attr "length" "1,1,2,1,1,1,1,1,1")])
+
+(define_insn "*store_si"
+ [(set (mem:SI (match_operand:SI 0 "symbolic_operand" ""))
+ (match_operand:SI 1 "reg_or_0_operand" "rJ"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+(define_expand "movdi"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "")
+ (match_operand:DI 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DImode))
+ DONE;
+}")
+
+;; 32 bit V9 movdi is like regular 32 bit except: a 64 bit zero can be stored
+;; to aligned memory with a single instruction, the ldd/std instructions
+;; are not used, and constants can not be moved to floating point registers.
+
+(define_insn "*movdi_sp32_v9"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,Q,r,r,?e,?e,?Q,?b")
+ (match_operand:DI 1 "general_operand" "r,J,r,Q,i,e,Q,e,J"))]
+ "TARGET_V9 && ! TARGET_ARCH64
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)
+ && (GET_CODE (operands[0]) != REG || ! CONSTANT_P (operands[1])
+ || REGNO (operands[0]) < 32
+ || REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER)"
+ "*
+{
+ if (which_alternative == 1)
+ return \"stx %%g0,%0\";
+ if (which_alternative == 8)
+ return \"fzero %0\";
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "move,store,store,load,multi,fp,fpload,fpstore,fpmove")
+ (set_attr "length" "2,1,3,3,3,2,3,3,1")])
+
+;; SPARC V9 deprecates std. Split it here.
+(define_split
+ [(set (match_operand:DI 0 "memory_operand" "=m")
+ (match_operand:DI 1 "register_operand" "r"))]
+ "TARGET_V9 && ! TARGET_ARCH64 && reload_completed
+ && REGNO (operands[1]) < 32 && ! MEM_VOLATILE_P (operands[0])
+ && offsettable_memref_p (operands[0])"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "operands[3] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[1]);
+ operands[4] = adj_offsettable_operand (operands[0], 4);
+ PUT_MODE (operands[4], SImode);
+ operands[2] = copy_rtx (operands[0]);
+ PUT_MODE (operands[2], SImode);")
+
+;; Split register to register moves.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "arith_double_operand" "rIN"))]
+ "! TARGET_ARCH64
+ && REGNO (operands[0]) < 32
+ && GET_CODE (operands[1]) == REG && REGNO (operands[1]) < 32
+ && ! reg_overlap_mentioned_p (operands[0], operands[1])"
+ [(set (match_dup 2) (match_dup 4))
+ (set (match_dup 3) (match_dup 5))]
+ "operands[2] = gen_highpart (SImode, operands[0]);
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[1]);")
+
+(define_insn "*movdi_sp32"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,T,U,Q,r,r,?f,?f,?Q")
+ (match_operand:DI 1 "general_operand" "r,U,T,r,Q,i,f,Q,f"))]
+ "! TARGET_V9
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "move,store,load,store,load,multi,fp,fpload,fpstore")
+ (set_attr "length" "2,1,1,3,3,3,2,3,3")])
+
+;;; ??? The trick used below can be extended to load any negative 32 bit
+;;; constant in two instructions. Currently the compiler will use HIGH/LO_SUM
+;;; for anything not matching the HIK constraints, which results in 5
+;;; instructions. Positive 32 bit constants can be loaded in the obvious way
+;;; with sethi/ori. To extend the trick, in the xor instruction, use
+;;; xor %o0, ((op1 & 0x3ff) | -0x400), %o0
+;;; This needs the original value of operands[1], not the inverted value.
+
+(define_insn "*movdi_sp64_insn"
+ [(set (match_operand:DI 0 "reg_or_nonsymb_mem_operand" "=r,r,r,Q,?e,?e,?Q")
+ (match_operand:DI 1 "move_operand" "rI,K,Q,rJ,e,Q,e"))]
+ "TARGET_ARCH64
+ && (register_operand (operands[0], DImode)
+ || register_operand (operands[1], DImode)
+ || operands[1] == const0_rtx)"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"mov %1,%0\";
+ case 1:
+ /* Sethi does not sign extend, so we must use a little trickery
+ to use it for negative numbers. Invert the constant before
+ loading it in, then use a xor immediate to invert the loaded bits
+ (along with the upper 32 bits) to the desired constant. This
+ works because the sethi and immediate fields overlap. */
+
+ if ((INTVAL (operands[1]) & 0x80000000) == 0)
+ return \"sethi %%hi(%a1),%0\";
+ else
+ {
+ operands[1] = GEN_INT (~INTVAL (operands[1]));
+ output_asm_insn (\"sethi %%hi(%a1),%0\", operands);
+ /* The low 10 bits are already zero, but invert the rest.
+ Assemblers don't accept 0x1c00, so use -0x400 instead. */
+ return \"xor %0,-0x400,%0\";
+ }
+ case 2:
+ return \"ldx %1,%0\";
+ case 3:
+ return \"stx %r1,%0\";
+ case 4:
+ return \"fmovd %1,%0\";
+ case 5:
+ return \"ldd %1,%0\";
+ case 6:
+ return \"std %1,%0\";
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "move,move,load,store,fp,fpload,fpstore")
+ (set_attr "length" "1,2,1,1,1,1,1")])
+
+;; ??? There's no symbolic (set (mem:DI ...) ...).
+;; Experimentation with v9 suggested one isn't needed.
+
+;; Floating point move insns
+
+;; This pattern forces (set (reg:SF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movsf pattern.
+(define_insn "*movsf_const_insn"
+ [(set (match_operand:SF 0 "general_operand" "=f,d,m,?r")
+ (match_operand:SF 1 "" "m,G,G,?F"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return \"ld %1,%0\";
+ case 1:
+ return \"fzeros %0\";
+ case 2:
+ return \"st %%g0,%0\";
+ case 3:
+ return singlemove_string (operands);
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "fpload,fpmove,store,load")
+ (set_attr "length" "1,1,1,2")])
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, SFmode))
+ DONE;
+}")
+
+(define_insn "*movsf_insn"
+ [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=f,f,Q,r,r,Q")
+ (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "f,Q,f,r,Q,r"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ fmovs %1,%0
+ ld %1,%0
+ st %1,%0
+ mov %1,%0
+ ld %1,%0
+ st %1,%0"
+ [(set_attr "type" "fpmove,fpload,fpstore,move,load,store")])
+
+;; Exactly the same as above, except that all `f' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `f' reg
+;; when -mno-fpu.
+
+(define_insn "*movsf_no_f_insn"
+ [(set (match_operand:SF 0 "reg_or_nonsymb_mem_operand" "=r,r,Q")
+ (match_operand:SF 1 "reg_or_nonsymb_mem_operand" "r,Q,r"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ mov %1,%0
+ ld %1,%0
+ st %1,%0"
+ [(set_attr "type" "move,load,store")])
+
+(define_insn "*store_sf"
+ [(set (mem:SF (match_operand:SI 0 "symbolic_operand" "i"))
+ (match_operand:SF 1 "reg_or_0_operand" "rfG"))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "sethi %%hi(%a0),%2\;st %r1,[%2+%%lo(%a0)]"
+ [(set_attr "type" "store")
+ (set_attr "length" "2")])
+
+;; This pattern forces (set (reg:DF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movdf pattern.
+
+(define_insn "*movdf_const_insn"
+ [(set (match_operand:DF 0 "general_operand" "=?r,e,o,d")
+ (match_operand:DF 1 "" "?F,m,G,G"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_move_double (operands);
+ case 1:
+ return output_fp_move_double (operands);
+ case 2:
+ if (TARGET_ARCH64 || (TARGET_V9 && mem_aligned_8 (operands[0])))
+ {
+ return \"stx %%g0,%0\";
+ }
+ else
+ {
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ return \"st %%g0,%0\;st %%g0,%1\";
+ }
+ case 3:
+ return \"fzero %0\";
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "load,fpload,store,fpmove")
+ (set_attr "length" "3,3,3,1")])
+
+(define_expand "movdf"
+ [(set (match_operand:DF 0 "general_operand" "")
+ (match_operand:DF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, DFmode))
+ DONE;
+}")
+
+(define_insn "*movdf_insn"
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=e,Q,e,T,U,r,Q,r")
+ (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "e,e,Q,U,T,r,r,Q"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_double (operands);
+ return output_move_double (operands);
+}"
+ [(set_attr "type" "fp,fpstore,fpload,fpstore,fpload,move,store,load")
+ (set_attr "length" "2,3,3,1,1,2,2,2")])
+
+;; Exactly the same as above, except that all `e' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `e' reg
+;; when -mno-fpu.
+
+(define_insn "*movdf_no_e_insn"
+ [(set (match_operand:DF 0 "reg_or_nonsymb_mem_operand" "=T,U,r,Q,&r")
+ (match_operand:DF 1 "reg_or_nonsymb_mem_operand" "U,T,r,r,Q"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], DFmode)
+ || register_operand (operands[1], DFmode))"
+ "* return output_move_double (operands);"
+ [(set_attr "type" "store,load,move,store,load")
+ (set_attr "length" "1,1,2,3,3")])
+
+;; Must handle overlapping registers here, since parameters can be unaligned
+;; in registers.
+
+(define_split
+ [(set (match_operand:DF 0 "register_operand" "")
+ (match_operand:DF 1 "register_operand" ""))]
+ "! TARGET_ARCH64 && reload_completed
+ && REGNO (operands[0]) < SPARC_FIRST_V9_FP_REG
+ && REGNO (operands[1]) < SPARC_FIRST_V9_FP_REG"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+ "
+{
+ rtx first_set = operand_subword (operands[0], 0, 0, DFmode);
+ rtx second_use = operand_subword (operands[1], 1, 0, DFmode);
+
+ if (REGNO (first_set) == REGNO (second_use))
+ {
+ operands[2] = operand_subword (operands[0], 1, 0, DFmode);
+ operands[3] = second_use;
+ operands[4] = first_set;
+ operands[5] = operand_subword (operands[1], 0, 0, DFmode);
+ }
+ else
+ {
+ operands[2] = first_set;
+ operands[3] = operand_subword (operands[1], 0, 0, DFmode);
+ operands[4] = operand_subword (operands[0], 1, 0, DFmode);
+ operands[5] = second_use;
+ }
+}")
+
+(define_insn "*store_df"
+ [(set (mem:DF (match_operand:SI 0 "symbolic_operand" "i,i"))
+ (match_operand:DF 1 "reg_or_0_operand" "re,G"))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "(reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "*
+{
+ output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
+ if (which_alternative == 0)
+ return \"std %1,[%2+%%lo(%a0)]\";
+ else
+ return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\";
+}"
+ [(set_attr "type" "store")
+ (set_attr "length" "3")])
+
+;; This pattern forces (set (reg:TF ...) (const_double ...))
+;; to be reloaded by putting the constant into memory.
+;; It must come before the more general movtf pattern.
+(define_insn "*movtf_const_insn"
+ [(set (match_operand:TF 0 "general_operand" "=?r,e,o")
+ (match_operand:TF 1 "" "?F,m,G"))]
+ "TARGET_FPU
+ && GET_CODE (operands[1]) == CONST_DOUBLE
+ && (GET_CODE (operands[0]) == REG
+ || fp_zero_operand (operands[1]))"
+ "*
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return output_move_quad (operands);
+ case 1:
+ return output_fp_move_quad (operands);
+ case 2:
+ if (TARGET_ARCH64 || (TARGET_V9 && mem_aligned_8 (operands[0])))
+ {
+ operands[1] = adj_offsettable_operand (operands[0], 8);
+ return \"stx %%g0,%0\;stx %%g0,%1\";
+ }
+ else
+ {
+ /* ??? Do we run off the end of the array here? */
+ operands[1] = adj_offsettable_operand (operands[0], 4);
+ operands[2] = adj_offsettable_operand (operands[0], 8);
+ operands[3] = adj_offsettable_operand (operands[0], 12);
+ return \"st %%g0,%0\;st %%g0,%1\;st %%g0,%2\;st %%g0,%3\";
+ }
+ default:
+ abort ();
+ }
+}"
+ [(set_attr "type" "load,fpload,store")
+ (set_attr "length" "5,5,5")])
+
+(define_expand "movtf"
+ [(set (match_operand:TF 0 "general_operand" "")
+ (match_operand:TF 1 "general_operand" ""))]
+ ""
+ "
+{
+ if (emit_move_sequence (operands, TFmode))
+ DONE;
+}")
+
+(define_insn "*movtf_insn"
+ [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=e,Q,e,r,Q,r")
+ (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "e,e,Q,r,r,Q"))]
+ "TARGET_FPU
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_quad (operands);
+ return output_move_quad (operands);
+}"
+ [(set_attr "type" "fp,fpstore,fpload,move,store,load")
+ (set_attr "length" "5,4,4,5,4,4")])
+
+;; Exactly the same as above, except that all `e' cases are deleted.
+;; This is necessary to prevent reload from ever trying to use a `e' reg
+;; when -mno-fpu.
+
+(define_insn "*movtf_no_e_insn"
+ [(set (match_operand:TF 0 "reg_or_nonsymb_mem_operand" "=r,Q,&r")
+ (match_operand:TF 1 "reg_or_nonsymb_mem_operand" "r,r,Q"))]
+ "! TARGET_FPU
+ && (register_operand (operands[0], TFmode)
+ || register_operand (operands[1], TFmode))"
+ "*
+{
+ if (FP_REG_P (operands[0]) || FP_REG_P (operands[1]))
+ return output_fp_move_quad (operands);
+ return output_move_quad (operands);
+}"
+ [(set_attr "type" "move,store,load")
+ (set_attr "length" "4,5,5")])
+
+;; This is disabled because it does not work. Long doubles have only 8
+;; byte alignment. Adding an offset of 8 or 12 to an 8 byte aligned %lo may
+;; cause it to overflow. See also GO_IF_LEGITIMATE_ADDRESS.
+(define_insn "*store_tf"
+ [(set (mem:TF (match_operand:SI 0 "symbolic_operand" "i,i"))
+ (match_operand:TF 1 "reg_or_0_operand" "re,G"))
+ (clobber (match_scratch:SI 2 "=&r,&r"))]
+ "0 && (reload_completed || reload_in_progress)
+ && ! TARGET_PTR64"
+ "*
+{
+ output_asm_insn (\"sethi %%hi(%a0),%2\", operands);
+ if (which_alternative == 0)
+ return \"std %1,[%2+%%lo(%a0)]\;std %S1,[%2+%%lo(%a0+8)]\";
+ else
+ return \"st %%g0,[%2+%%lo(%a0)]\;st %%g0,[%2+%%lo(%a0+4)]\; st %%g0,[%2+%%lo(%a0+8)]\;st %%g0,[%2+%%lo(%a0+12)]\";
+}"
+ [(set_attr "type" "store")
+ (set_attr "length" "5")])
+
+;; Sparc V9 conditional move instructions.
+
+;; We can handle larger constants here for some flavors, but for now we keep
+;; it simple and only allow those constants supported by all flavours.
+;; Note that emit_conditional_move canonicalizes operands 2,3 so that operand
+;; 3 contains the constant if one is present, but we handle either for
+;; generality (sparc.c puts a constant in operand 2).
+
+(define_expand "movqicc"
+ [(set (match_operand:QI 0 "register_operand" "")
+ (if_then_else:QI (match_operand 1 "comparison_operator" "")
+ (match_operand:QI 2 "arith10_operand" "")
+ (match_operand:QI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movhicc"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (if_then_else:HI (match_operand 1 "comparison_operator" "")
+ (match_operand:HI 2 "arith10_operand" "")
+ (match_operand:HI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movsicc"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI (match_operand 1 "comparison_operator" "")
+ (match_operand:SI 2 "arith10_operand" "")
+ (match_operand:SI 3 "arith10_operand" "")))]
+ "TARGET_V9"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+ enum machine_mode op0_mode = GET_MODE (sparc_compare_op0);
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && ((TARGET_ARCH64 && op0_mode == DImode && v9_regcmp_p (code))
+ || (op0_mode == SImode && v8plus_regcmp_p (code))))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, op0_mode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movdicc"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (if_then_else:DI (match_operand 1 "comparison_operator" "")
+ (match_operand:DI 2 "arith10_double_operand" "")
+ (match_operand:DI 3 "arith10_double_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg),
+ cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movsfcc"
+ [(set (match_operand:SF 0 "register_operand" "")
+ (if_then_else:SF (match_operand 1 "comparison_operator" "")
+ (match_operand:SF 2 "register_operand" "")
+ (match_operand:SF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movdfcc"
+ [(set (match_operand:DF 0 "register_operand" "")
+ (if_then_else:DF (match_operand 1 "comparison_operator" "")
+ (match_operand:DF 2 "register_operand" "")
+ (match_operand:DF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+(define_expand "movtfcc"
+ [(set (match_operand:TF 0 "register_operand" "")
+ (if_then_else:TF (match_operand 1 "comparison_operator" "")
+ (match_operand:TF 2 "register_operand" "")
+ (match_operand:TF 3 "register_operand" "")))]
+ "TARGET_V9 && TARGET_FPU"
+ "
+{
+ enum rtx_code code = GET_CODE (operands[1]);
+
+ if (GET_MODE (sparc_compare_op0) == DImode
+ && ! TARGET_ARCH64)
+ FAIL;
+
+ if (sparc_compare_op1 == const0_rtx
+ && GET_CODE (sparc_compare_op0) == REG
+ && GET_MODE (sparc_compare_op0) == DImode
+ && v9_regcmp_p (code))
+ {
+ operands[1] = gen_rtx_fmt_ee (code, DImode,
+ sparc_compare_op0, sparc_compare_op1);
+ }
+ else
+ {
+ rtx cc_reg = gen_compare_reg (code,
+ sparc_compare_op0, sparc_compare_op1);
+ operands[1] = gen_rtx_fmt_ee (code, GET_MODE (cc_reg), cc_reg, const0_rtx);
+ }
+}")
+
+;; Conditional move define_insns.
+
+(define_insn "*movqi_cc_sp64"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:QI 3 "arith11_operand" "rL,0")
+ (match_operand:QI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movhi_cc_sp64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:HI 3 "arith11_operand" "rL,0")
+ (match_operand:HI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsi_cc_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:SI 3 "arith11_operand" "rL,0")
+ (match_operand:SI 4 "arith11_operand" "0,rL")))]
+ "TARGET_V9"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+;; ??? The constraints of operands 3,4 need work.
+(define_insn "*movdi_cc_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:DI 3 "arith11_double_operand" "rLH,0")
+ (match_operand:DI 4 "arith11_double_operand" "0,rLH")))]
+ "TARGET_ARCH64"
+ "@
+ mov%C1 %x2,%3,%0
+ mov%c1 %x2,%4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsf_cc_sp64"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
+ "TARGET_V9 && TARGET_FPU"
+ "@
+ fmovs%C1 %x2,%3,%0
+ fmovs%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movdf_cc_sp64"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (if_then_else:DF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "@
+ fmovd%C1 %x2,%3,%0
+ fmovd%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movtf_cc_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "comparison_operator"
+ [(match_operand 2 "icc_or_fcc_reg_operand" "X,X")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "@
+ fmovq%C1 %x2,%3,%0
+ fmovq%c1 %x2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movqi_cc_reg_sp64"
+ [(set (match_operand:QI 0 "register_operand" "=r,r")
+ (if_then_else:QI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:QI 3 "arith10_operand" "rM,0")
+ (match_operand:QI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movhi_cc_reg_sp64"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (if_then_else:HI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:HI 3 "arith10_operand" "rM,0")
+ (match_operand:HI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsi_cc_reg_sp64"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+;; On UltraSPARC this is slightly worse than cmp/mov %icc if the register
+;; needs to be zero extended but better on average.
+(define_insn "*movsi_cc_reg_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "TARGET_V9"
+ "*
+{
+ if (! sparc_check_64 (operands[2], insn))
+ output_asm_insn (\"srl %2,0,%2\", operands);
+ if (which_alternative == 0)
+ return \"movr%D1 %2,%r3,%0\";
+ return \"movr%d1 %2,%r4,%0\";
+}"
+ [(set_attr "type" "cmove")
+ (set_attr "length" "2")])
+
+;; To work well this needs to know the current insn, but that is not an
+;; argument to gen_split_*.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (if_then_else:SI (match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")))]
+ "reload_completed"
+ [(set (match_dup 0)
+ (unspec:SI [(match_dup 1) (match_dup 3) (match_dup 4)] 9))]
+ "if (! sparc_check_64 (operands[2], NULL_RTX))
+ emit_insn (gen_v8plus_clear_high (operands[2], operands[2]));")
+
+;; A conditional move with the condition argument known to be zero extended
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (unspec:SI [(match_operator 1 "v8plus_regcmp_op"
+ [(match_operand:SI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SI 3 "arith10_operand" "rM,0")
+ (match_operand:SI 4 "arith10_operand" "0,rM")] 9))]
+ "TARGET_V9"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+;; ??? The constraints of operands 3,4 need work.
+(define_insn "*movdi_cc_reg_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (if_then_else:DI (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:DI 3 "arith10_double_operand" "rMH,0")
+ (match_operand:DI 4 "arith10_double_operand" "0,rMH")))]
+ "TARGET_ARCH64"
+ "@
+ movr%D1 %2,%r3,%0
+ movr%d1 %2,%r4,%0"
+ [(set_attr "type" "cmove")])
+
+(define_insn "*movsf_cc_reg_sp64"
+ [(set (match_operand:SF 0 "register_operand" "=f,f")
+ (if_then_else:SF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:SF 3 "register_operand" "f,0")
+ (match_operand:SF 4 "register_operand" "0,f")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrs%D1 %2,%3,%0
+ fmovrs%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movdf_cc_reg_sp64"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (if_then_else:DF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:DF 3 "register_operand" "e,0")
+ (match_operand:DF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrd%D1 %2,%3,%0
+ fmovrd%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+(define_insn "*movtf_cc_reg_sp64"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (if_then_else:TF (match_operator 1 "v9_regcmp_op"
+ [(match_operand:DI 2 "register_operand" "r,r")
+ (const_int 0)])
+ (match_operand:TF 3 "register_operand" "e,0")
+ (match_operand:TF 4 "register_operand" "0,e")))]
+ "TARGET_ARCH64 && TARGET_FPU"
+ "@
+ fmovrq%D1 %2,%3,%0
+ fmovrq%d1 %2,%4,%0"
+ [(set_attr "type" "fpcmove")])
+
+;;- zero extension instructions
+
+;; These patterns originally accepted general_operands, however, slightly
+;; better code is generated by only accepting register_operands, and then
+;; letting combine generate the ldu[hb] insns.
+
+(define_expand "zero_extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = GEN_INT (16);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_16));
+ emit_insn (gen_lshrsi3 (operand0, temp, shift_16));
+ DONE;
+}")
+
+(define_insn "*zero_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "lduh %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "zero_extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (zero_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*zero_extendqihi2_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r,r")
+ (zero_extend:HI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (zero_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*zero_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (zero_extend:SI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:QI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*zero_extendqidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:QI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
+ "@
+ and %1,0xff,%0
+ ldub %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+(define_expand "zero_extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:HI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_48 = GEN_INT (48);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_48));
+ emit_insn (gen_lshrdi3 (operand0, temp, shift_48));
+ DONE;
+}")
+
+(define_insn "*zero_extendhidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (zero_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "lduh %1,%0"
+ [(set_attr "type" "load")])
+
+
+;; ??? Write truncdisi pattern using sra?
+
+(define_expand "zero_extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (zero_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*zero_extendsidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (zero_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64 && GET_CODE (operands[1]) != CONST_INT"
+ "@
+ srl %1,0,%0
+ lduw %1,%0"
+ [(set_attr "type" "unary,load")
+ (set_attr "length" "1")])
+
+;; Zero extend a 32 bit value in a 64 bit register.
+(define_insn "v8plus_clear_high"
+ [(set (match_operand:SI 0 "reg_or_nonsymb_mem_operand" "=r,Q")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "r,r")] 10))]
+ "TARGET_V9"
+ "*
+if (which_alternative == 1)
+ return \"st %1,%0\";
+if (sparc_check_64 (operands[1], insn) > 0)
+ return final_sequence ? \"nop\" : \"\";
+return \"srl %1,0,%0\";
+"
+ [(set_attr "type" "shift,store")])
+
+;; Simplify comparisons of extended values.
+
+(define_insn "*cmp_zero_extendqisi2"
+ [(set (reg:CC 100)
+ (compare:CC (zero_extend:SI (match_operand:QI 0 "register_operand" "r"))
+ (const_int 0)))]
+ ""
+ "andcc %0,0xff,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_zero_extendqisi2_set"
+ [(set (reg:CC 100)
+ (compare:CC (zero_extend:SI (match_operand:QI 1 "register_operand" "r"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extend:SI (match_dup 1)))]
+ ""
+ "andcc %1,0xff,%0"
+ [(set_attr "type" "unary")])
+
+;; Similarly, handle SI->QI mode truncation followed by a compare.
+
+(define_insn "*cmp_siqi_trunc"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:SI 0 "register_operand" "r") 0)
+ (const_int 0)))]
+ ""
+ "andcc %0,0xff,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_siqi_trunc_set"
+ [(set (reg:CC 100)
+ (compare:CC (subreg:QI (match_operand:SI 1 "register_operand" "r") 0)
+ (const_int 0)))
+ (set (match_operand:QI 0 "register_operand" "=r")
+ (match_dup 1))]
+ ""
+ "andcc %1,0xff,%0"
+ [(set_attr "type" "unary")])
+
+;;- sign extension instructions
+
+;; These patterns originally accepted general_operands, however, slightly
+;; better code is generated by only accepting register_operands, and then
+;; letting combine generate the lds[hb] insns.
+
+(define_expand "extendhisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:HI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_16 = GEN_INT (16);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_16));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_16));
+ DONE;
+}")
+
+(define_insn "*sign_extendhisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "memory_operand" "m")))]
+ ""
+ "ldsh %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqihi2"
+ [(set (match_operand:HI 0 "register_operand" "")
+ (sign_extend:HI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = GEN_INT (24);
+ int op1_subword = 0;
+ int op0_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+ if (GET_CODE (operand0) == SUBREG)
+ {
+ op0_subword = SUBREG_WORD (operand0);
+ operand0 = XEXP (operand0, 0);
+ }
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_24));
+ if (GET_MODE (operand0) != SImode)
+ operand0 = gen_rtx_SUBREG (SImode, operand0, op0_subword);
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+}")
+
+(define_insn "*sign_extendqihi2_insn"
+ [(set (match_operand:HI 0 "register_operand" "=r")
+ (sign_extend:HI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqisi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (sign_extend:SI (match_operand:QI 1 "register_operand" "")))]
+ ""
+ "
+{
+ rtx temp = gen_reg_rtx (SImode);
+ rtx shift_24 = GEN_INT (24);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashlsi3 (temp, gen_rtx_SUBREG (SImode, operand1,
+ op1_subword),
+ shift_24));
+ emit_insn (gen_ashrsi3 (operand0, temp, shift_24));
+ DONE;
+}")
+
+(define_insn "*sign_extendqisi2_insn"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "memory_operand" "m")))]
+ ""
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendqidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:QI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_56 = GEN_INT (56);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_56));
+ emit_insn (gen_ashrdi3 (operand0, temp, shift_56));
+ DONE;
+}")
+
+(define_insn "*sign_extendqidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:QI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "ldsb %1,%0"
+ [(set_attr "type" "sload")])
+
+(define_expand "extendhidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:HI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "
+{
+ rtx temp = gen_reg_rtx (DImode);
+ rtx shift_48 = GEN_INT (48);
+ int op1_subword = 0;
+
+ if (GET_CODE (operand1) == SUBREG)
+ {
+ op1_subword = SUBREG_WORD (operand1);
+ operand1 = XEXP (operand1, 0);
+ }
+
+ emit_insn (gen_ashldi3 (temp, gen_rtx_SUBREG (DImode, operand1,
+ op1_subword),
+ shift_48));
+ emit_insn (gen_ashrdi3 (operand0, temp, shift_48));
+ DONE;
+}")
+
+(define_insn "*sign_extendhidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (sign_extend:DI (match_operand:HI 1 "memory_operand" "m")))]
+ "TARGET_ARCH64"
+ "ldsh %1,%0"
+ [(set_attr "type" "load")])
+
+(define_expand "extendsidi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (sign_extend:DI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_ARCH64"
+ "")
+
+(define_insn "*sign_extendsidi2_insn"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (sign_extend:DI (match_operand:SI 1 "sparc_operand" "r,Q")))]
+ "TARGET_ARCH64"
+ "@
+ sra %1,0,%0
+ ldsw %1,%0"
+ [(set_attr "type" "unary,sload")
+ (set_attr "length" "1")])
+
+;; Special pattern for optimizing bit-field compares. This is needed
+;; because combine uses this as a canonical form.
+
+(define_insn "*cmp_zero_extract"
+ [(set (reg:CC 100)
+ (compare:CC
+ (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "small_int" "n")
+ (match_operand:SI 2 "small_int" "n"))
+ (const_int 0)))]
+ "INTVAL (operands[2]) > 19"
+ "*
+{
+ int len = INTVAL (operands[1]);
+ int pos = 32 - INTVAL (operands[2]) - len;
+ unsigned mask = ((1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc %0,%1,%%g0\";
+}")
+
+(define_insn "*cmp_zero_extract_sp64"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (zero_extract:DI (match_operand:DI 0 "register_operand" "r")
+ (match_operand:SI 1 "small_int" "n")
+ (match_operand:SI 2 "small_int" "n"))
+ (const_int 0)))]
+ "TARGET_ARCH64 && INTVAL (operands[2]) > 51"
+ "*
+{
+ int len = INTVAL (operands[1]);
+ int pos = 64 - INTVAL (operands[2]) - len;
+ unsigned HOST_WIDE_INT mask = (((unsigned HOST_WIDE_INT) 1 << len) - 1) << pos;
+
+ operands[1] = GEN_INT (mask);
+ return \"andcc %0,%1,%%g0\";
+}")
+
+;; Conversions between float, double and long double.
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float_extend:DF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fstod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "extendsftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fstoq %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "extenddftf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float_extend:TF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fdtoq %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fdtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float_truncate:SF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "trunctfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float_truncate:DF
+ (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtod %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Conversion between fixed point and floating point.
+
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fitos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float:DF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fitod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatsitf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:SI 1 "register_operand" "f")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fitoq %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Now the same for 64 bit sources.
+
+(define_insn "floatdisf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (float:SF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "fxtos %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatdidf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (float:DF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU"
+ "fxtod %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "floatditf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (float:TF (match_operand:DI 1 "register_operand" "e")))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fxtoq %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Convert a float to an actual integer.
+;; Truncation is performed as part of the conversion.
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
+ "TARGET_FPU"
+ "fstoi %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_FPU"
+ "fdtoi %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_trunctfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=f")
+ (fix:SI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtoi %1,%0"
+ [(set_attr "type" "fp")])
+
+;; Now the same, for V9 targets
+
+(define_insn "fix_truncsfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:SF (match_operand:SF 1 "register_operand" "f"))))]
+ "TARGET_V9 && TARGET_FPU"
+ "fstox %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_truncdfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:DF (match_operand:DF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU"
+ "fdtox %1,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fix_trunctfdi2"
+ [(set (match_operand:DI 0 "register_operand" "=e")
+ (fix:DI (fix:TF (match_operand:TF 1 "register_operand" "e"))))]
+ "TARGET_V9 && TARGET_FPU && TARGET_HARD_QUAD"
+ "fqtox %1,%0"
+ [(set_attr "type" "fp")])
+
+;;- arithmetic instructions
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_PLUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*adddi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
+ output_asm_insn (\"add %H1,%2,%H0\", xoperands);
+ else
+ output_asm_insn (\"addcc %L1,%3,%L0\;addx %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"addcc %L1,%L2,%L0\;addx %H1,%H2,%H0\";
+}"
+ [(set_attr "length" "2")])
+
+
+;; Split DImode arithmetic
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (plus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (plus:SI (plus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "arith_double_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64 && reload_completed"
+ [(parallel [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_dup 4)
+ (match_dup 5))
+ (const_int 0)))
+ (set (match_dup 3)
+ (minus:SI (match_dup 4) (match_dup 5)))])
+ (set (match_dup 6)
+ (minus:SI (minus:SI (match_dup 7)
+ (match_dup 8))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ "
+{
+ operands[3] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_lowpart (SImode, operands[2]);
+ operands[6] = gen_highpart (SImode, operands[0]);
+ operands[7] = gen_highpart (SImode, operands[1]);
+ if (GET_CODE (operands[2]) == CONST_INT)
+ {
+ if (INTVAL (operands[2]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[2]);
+}")
+
+;; LTU here means "carry set"
+(define_insn "*addx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "addx %1,%2,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*subx"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (ltu:SI (reg:CC_NOOV 100) (const_int 0))))]
+ ""
+ "subx %1,%2,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "addcc %L2,%1,%L0\;addx %H2,0,%H0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*adddi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "add %1,%2,%0")
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (plus:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ add %1,%2,%0
+ fpadd32s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_insn "*cmp_cc_plus"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_operand:SI 0 "arith_operand" "%r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "addcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_plus"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (plus:DI (match_operand:DI 0 "arith_double_operand" "%r")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "addcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_plus_set"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (plus:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "addcc %1,%2,%0")
+
+(define_insn "*cmp_ccx_plus_set"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (plus:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_ARCH64"
+ "addcc %1,%2,%0")
+
+(define_expand "subdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_MINUS (DImode, operands[1],
+ operands[2])),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*subdi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ if (xoperands[3] == const0_rtx && xoperands[0] == xoperands[1])
+ output_asm_insn (\"sub %H1,%2,%H0\", xoperands);
+ else
+ output_asm_insn (\"subcc %L1,%3,%L0\;subx %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"subcc %L1,%L2,%L0\;subx %H1,%H2,%H0\";
+}"
+ [(set_attr "length" "2")])
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "subcc %L1,%2,%L0\;addx %H1,0,%H0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*subdi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "sub %1,%2,%0")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (minus:SI (match_operand:SI 1 "register_operand" "r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ sub %1,%2,%0
+ fpsub32s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_insn "*cmp_minus_cc"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))]
+ ""
+ "subcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_minus_ccx"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (minus:DI (match_operand:DI 0 "register_operand" "r")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "subcc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_minus_cc_set"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (minus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (minus:SI (match_dup 1) (match_dup 2)))]
+ ""
+ "subcc %1,%2,%0")
+
+(define_insn "*cmp_minus_ccx_set"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (minus:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (minus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_ARCH64"
+ "subcc %1,%2,%0")
+
+;; Integer Multiply/Divide.
+
+;; The 32 bit multiply/divide instructions are deprecated on v9 and shouldn't
+;; we used. We still use them in 32 bit v9 compilers.
+;; The 64 bit v9 compiler will (/should) widen the args and use muldi3.
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_HARD_MUL"
+ "smul %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_expand "muldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_muldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "*muldi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "mulx %1,%2,%0")
+
+;; V8plus wide multiply.
+(define_insn "muldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (mult:DI (match_operand:DI 1 "arith_double_operand" "%r,0")
+ (match_operand:DI 2 "arith_double_operand" "rHI,rHI")))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_V8PLUS"
+ "*
+{
+ if (sparc_check_64 (operands[1], insn) <= 0)
+ output_asm_insn (\"srl %L1,0,%L1\", operands);
+ if (which_alternative == 1)
+ output_asm_insn (\"sllx %H1,32,%H1\", operands);
+ if (sparc_check_64 (operands[2], insn) <= 0)
+ output_asm_insn (\"srl %L2,0,%L2\", operands);
+ if (which_alternative == 1)
+ return \"or %L1,%H1,%H1\;sllx %H2,32,%L1\;or %L2,%L1,%L1\;mulx %H1,%L1,%L0\;srlx %L0,32,%H0\";
+ else
+ return \"sllx %H1,32,%3\;sllx %H2,32,%4\;or %L1,%3,%3\;or %L2,%4,%4\;mulx %3,%4,%3\;srlx %3,32,%H0\;mov %3,%L0\";
+}"
+ [(set_attr "length" "9,8")])
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_mul_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mult:SI (match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (mult:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "TARGET_V8 || TARGET_SPARCLITE || TARGET_DEPRECATED_V8_INSNS"
+ "smulcc %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_expand "mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "arith_operand" ""))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_mulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ emit_insn (gen_const_mulsidi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_mulsidi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+;; V9 puts the 64 bit product in a 64 bit register. Only out or global
+;; registers can hold 64 bit values in the V8plus environment.
+(define_insn "mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%L0\;srlx %L0,32,%H0
+ smul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "const_mulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "small_int" "I,I")))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%L0\;srlx %L0,32,%H0
+ smul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "*mulsidi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+;; Extra pattern, because sign_extend of a constant isn't valid.
+
+(define_insn "const_mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "small_int" "I")))]
+ "TARGET_HARD_MUL"
+ "*
+{
+ return TARGET_SPARCLET ? \"smuld %1,%2,%L0\" : \"smul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+(define_expand "smulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (sign_extend:DI (match_operand:SI 2 "arith_operand" "")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_smulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
+ emit_insn (gen_const_smulsi3_highpart (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_smulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
+}")
+
+(define_insn "smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+;; The combiner changes TRUNCATE in the previous pattern to SUBREG.
+(define_insn ""
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (subreg:SI
+ (lshiftrt:DI
+ (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))
+ 1))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_smulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand 2 "small_int" "i,i"))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ smul %1,%2,%0\;srlx %0,%3,%0
+ smul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*smulsi3_highpart_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (sign_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "smul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_smulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "register_operand" "r"))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "smul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_expand "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" ""))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_umulsidi3_v8plus (operands[0], operands[1],
+ operands[2]));
+ DONE;
+ }
+ emit_insn (gen_const_umulsidi3 (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsidi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn "umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r"))))
+ (clobber (match_scratch:SI 3 "=X,&h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%L0\;srlx %L0,32,%H0
+ umul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_insn "*umulsidi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+;; Extra pattern, because sign_extend of a constant isn't valid.
+
+(define_insn "const_umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" "")))]
+ "TARGET_HARD_MUL32"
+ "*
+{
+ return TARGET_SPARCLET ? \"umuld %1,%2,%L0\" : \"umul %1,%2,%L0\;rd %%y,%H0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "sparclet")
+ (const_int 1) (const_int 2)))])
+
+(define_insn "const_umulsidi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=h,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" "")))
+ (clobber (match_scratch:SI 3 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%L0\;srlx %L0,32,%H0
+ umul %1,%2,%3\;srlx %3,32,%H0\;mov %3,%L0"
+ [(set_attr "length" "2,3")])
+
+(define_expand "umulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" ""))
+ (zero_extend:DI (match_operand:SI 2 "uns_arith_operand" "")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL"
+ "
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_const_umulsi3_highpart_v8plus (operands[0],
+ operands[1],
+ operands[2],
+ GEN_INT (32)));
+ DONE;
+ }
+ emit_insn (gen_const_umulsi3_highpart (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+ if (TARGET_V8PLUS)
+ {
+ emit_insn (gen_umulsi3_highpart_v8plus (operands[0], operands[1],
+ operands[2], GEN_INT (32)));
+ DONE;
+ }
+}")
+
+(define_insn "umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r,r")))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%0\;srlx %0,%3,%0
+ umul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_umulsi3_highpart_v8plus"
+ [(set (match_operand:SI 0 "register_operand" "=h,r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (match_operand:SI 2 "uns_small_int" ""))
+ (match_operand:SI 3 "const_int_operand" "i,i"))))
+ (clobber (match_scratch:SI 4 "=X,h"))]
+ "TARGET_V8PLUS"
+ "@
+ umul %1,%2,%0\;srlx %0,%3,%0
+ umul %1,%2,%4\;srlx %4,%3,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "*umulsi3_highpart_sp32"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "r")))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "umul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+(define_insn "const_umulsi3_highpart"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (truncate:SI
+ (lshiftrt:DI (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r"))
+ (match_operand:SI 2 "uns_small_int" ""))
+ (const_int 32))))]
+ "TARGET_HARD_MUL32"
+ "umul %1,%2,%%g0\;rd %%y,%0"
+ [(set_attr "length" "2")])
+
+;; The v8 architecture specifies that there must be 3 instructions between
+;; a y register write and a use of it for correct results.
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "move_operand" "rI,m")))
+ (clobber (match_scratch:SI 3 "=&r,&r"))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (which_alternative == 0)
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdiv %1,%2,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdiv %1,%2,%0\";
+ else
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;ld %2,%3\;sdiv %1,%3,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;ld %2,%3\;nop\;nop\;sdiv %1,%3,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 4) (const_int 7)))])
+
+(define_insn "divdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (div:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "sdivx %1,%2,%0")
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_sdiv_cc_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (div:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC 100)
+ (compare:CC (div:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))
+ (clobber (match_scratch:SI 3 "=&r"))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (TARGET_V9)
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;sdivcc %1,%2,%0\";
+ else
+ return \"sra %1,31,%3\;wr %%g0,%3,%%y\;nop\;nop\;nop\;sdivcc %1,%2,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 3) (const_int 6)))])
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,&r,&r")
+ (udiv:SI (match_operand:SI 1 "reg_or_nonsymb_mem_operand" "r,r,m")
+ (match_operand:SI 2 "move_operand" "rI,m,r")))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ output_asm_insn (\"wr %%g0,%%g0,%%y\", operands);
+ switch (which_alternative)
+ {
+ default:
+ if (TARGET_V9)
+ return \"udiv %1,%2,%0\";
+ return \"nop\;nop\;nop\;udiv %1,%2,%0\";
+ case 1:
+ return \"ld %2,%0\;nop\;nop\;udiv %1,%0,%0\";
+ case 2:
+ return \"ld %1,%0\;nop\;nop\;udiv %0,%2,%0\";
+ }
+}"
+ [(set (attr "length")
+ (if_then_else (and (eq_attr "isa" "v9")
+ (eq_attr "alternative" "0"))
+ (const_int 2) (const_int 5)))])
+
+(define_insn "udivdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (udiv:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "udivx %1,%2,%0")
+
+;; It is not known whether this will match.
+
+(define_insn "*cmp_udiv_cc_set"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (udiv:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (set (reg:CC 100)
+ (compare:CC (udiv:SI (match_dup 1) (match_dup 2))
+ (const_int 0)))]
+ "TARGET_V8 || TARGET_DEPRECATED_V8_INSNS"
+ "*
+{
+ if (TARGET_V9)
+ return \"wr %%g0,%%g0,%%y\;udivcc %1,%2,%0\";
+ else
+ return \"wr %%g0,%%g0,%%y\;nop\;nop\;nop\;udivcc %1,%2,%0\";
+}"
+ [(set (attr "length")
+ (if_then_else (eq_attr "isa" "v9")
+ (const_int 2) (const_int 5)))])
+
+; sparclet multiply/accumulate insns
+
+(define_insn "*smacsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (mult:SI (match_operand:SI 1 "register_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (match_operand:SI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "smac %1,%2,%0"
+ [(set_attr "type" "imul")])
+
+(define_insn "*smacdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (sign_extend:DI
+ (match_operand:SI 1 "register_operand" "%r"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "smacd %1,%2,%L0"
+ [(set_attr "type" "imul")])
+
+(define_insn "*umacdi"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "%r"))
+ (zero_extend:DI
+ (match_operand:SI 2 "register_operand" "r")))
+ (match_operand:DI 3 "register_operand" "0")))]
+ "TARGET_SPARCLET"
+ "umacd %1,%2,%L0"
+ [(set_attr "type" "imul")])
+
+;;- Boolean instructions
+;; We define DImode `and' so with DImode `not' we can get
+;; DImode `andn'. Other combinations are possible.
+
+(define_expand "anddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*anddi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"fand %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"and %L1,%3,%L0\;and %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"and %1,%2,%0\;and %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*anddi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "and %1,%2,%0")
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ and %1,%2,%0
+ fands %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (and:SI (not:SI (match_dup 3)) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+;; Split DImode logical operations requiring two instructions.
+(define_split
+ [(set (match_operand:DI 0 "register_operand" "")
+ (match_operator:DI 1 "cc_arithop" ; AND, IOR, XOR
+ [(match_operand:DI 2 "register_operand" "")
+ (match_operand:DI 3 "arith_double_operand" "")]))]
+ "! TARGET_ARCH64 && reload_completed
+ && GET_CODE (operands[0]) == REG && REGNO (operands[0]) < 32"
+ [(set (match_dup 4) (match_op_dup:SI 1 [(match_dup 6) (match_dup 8)]))
+ (set (match_dup 5) (match_op_dup:SI 1 [(match_dup 7) (match_dup 9)]))]
+ "
+{
+ operands[4] = gen_highpart (SImode, operands[0]);
+ operands[5] = gen_lowpart (SImode, operands[0]);
+ operands[6] = gen_highpart (SImode, operands[2]);
+ operands[7] = gen_lowpart (SImode, operands[2]);
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ if (INTVAL (operands[3]) < 0)
+ operands[8] = constm1_rtx;
+ else
+ operands[8] = const0_rtx;
+ }
+ else
+ operands[8] = gen_highpart (SImode, operands[3]);
+ operands[9] = gen_lowpart (SImode, operands[3]);
+}")
+
+(define_insn "*and_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ andn %2,%1,%0\;andn %R2,%R1,%R0
+ fandnot1 %1,%2,%0"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*and_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (and:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "andn %2,%1,%0")
+
+(define_insn "*and_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (and:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
+ ""
+ "@
+ andn %2,%1,%0
+ fandnot1s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_expand "iordi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*iordi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"for %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"or %L1,%3,%L0\;or %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"or %1,%2,%0\;or %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*iordi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "or %1,%2,%0")
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (match_operand:SI 1 "arith_operand" "%r,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ or %1,%2,%0
+ fors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (ior:SI (not:SI (match_dup 3)) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+(define_insn "*or_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r,b"))
+ (match_operand:DI 2 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ orn %2,%1,%0\;orn %R2,%R1,%R0
+ fornot1 %1,%2,%0"
+ [(set_attr "length" "2,1")])
+
+(define_insn "*or_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ior:DI (not:DI (match_operand:DI 1 "register_operand" "r"))
+ (match_operand:DI 2 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "orn %2,%1,%0")
+
+(define_insn "*or_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (ior:SI (not:SI (match_operand:SI 1 "register_operand" "r,d"))
+ (match_operand:SI 2 "register_operand" "r,d")))]
+ ""
+ "@
+ orn %2,%1,%0
+ fornot1s %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_expand "xordi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "")
+ (match_operand:DI 2 "arith_double_operand" "")))]
+ ""
+ "")
+
+(define_insn "*xordi3_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%r,b")
+ (match_operand:DI 2 "arith_double_operand" "rHI,b")))]
+ "! TARGET_ARCH64"
+ "*
+{
+ rtx op2 = operands[2];
+
+ if (which_alternative == 1)
+ return \"fxor %1,%2,%0\";
+
+ if (GET_CODE (op2) == CONST_INT
+ || GET_CODE (op2) == CONST_DOUBLE)
+ {
+ rtx xoperands[4];
+ xoperands[0] = operands[0];
+ xoperands[1] = operands[1];
+ if (WORDS_BIG_ENDIAN)
+ split_double (op2, &xoperands[2], &xoperands[3]);
+ else
+ split_double (op2, &xoperands[3], &xoperands[2]);
+ output_asm_insn (\"xor %L1,%3,%L0\;xor %H1,%2,%H0\", xoperands);
+ return \"\";
+ }
+ return \"xor %1,%2,%0\;xor %R1,%R2,%R0\";
+}"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
+
+(define_insn "*xordi3_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (xor:DI (match_operand:DI 1 "arith_double_operand" "%rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "xor %r1,%2,%0")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (xor:SI (match_operand:SI 1 "arith_operand" "%rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d")))]
+ ""
+ "@
+ xor %r1,%2,%0
+ fxors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" "")))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (not:SI (xor:SI (match_dup 3) (match_dup 1))))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (xor:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "" ""))))
+ (clobber (match_operand:SI 3 "register_operand" ""))]
+ "GET_CODE (operands[2]) == CONST_INT
+ && !SMALL_INT32 (operands[2])
+ && (INTVAL (operands[2]) & 0x3ff) == 0x3ff"
+ [(set (match_dup 3) (match_dup 4))
+ (set (match_dup 0) (xor:SI (match_dup 3) (match_dup 1)))]
+ "
+{
+ operands[4] = GEN_INT (~INTVAL (operands[2]) & 0xffffffff);
+}")
+
+;; xnor patterns. Note that (a ^ ~b) == (~a ^ b) == ~(a ^ b).
+;; Combine now canonicalizes to the rightmost expression.
+(define_insn "*xor_not_di_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (xor:DI (match_operand:DI 1 "register_operand" "r,b")
+ (match_operand:DI 2 "register_operand" "r,b"))))]
+ "! TARGET_ARCH64"
+ "@
+ xnor %1,%2,%0\;xnor %R1,%R2,%R0
+ fxnor %1,%2,%0"
+ [(set_attr "length" "2,1")
+ (set_attr "type" "ialu,fp")])
+
+(define_insn "*xor_not_di_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI"))))]
+ "TARGET_ARCH64"
+ "xnor %r1,%2,%0"
+ [(set_attr "type" "ialu")])
+
+(define_insn "*xor_not_si"
+ [(set (match_operand:SI 0 "register_operand" "=r,d")
+ (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,d")
+ (match_operand:SI 2 "arith_operand" "rI,d"))))]
+ ""
+ "@
+ xnor %r1,%2,%0
+ fxnors %1,%2,%0"
+ [(set_attr "type" "ialu,fp")])
+
+;; These correspond to the above in the case where we also (or only)
+;; want to set the condition code.
+
+(define_insn "*cmp_cc_arith_op"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 2 "cc_arithop"
+ [(match_operand:SI 0 "arith_operand" "%r")
+ (match_operand:SI 1 "arith_operand" "rI")])
+ (const_int 0)))]
+ ""
+ "%A2cc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_arith_op"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 2 "cc_arithop"
+ [(match_operand:DI 0 "arith_double_operand" "%r")
+ (match_operand:DI 1 "arith_double_operand" "rHI")])
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "%A2cc %0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_arith_op_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 3 "cc_arithop"
+ [(match_operand:SI 1 "arith_operand" "%r")
+ (match_operand:SI 2 "arith_operand" "rI")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (match_dup 3))]
+ ""
+ "%A3cc %1,%2,%0")
+
+(define_insn "*cmp_ccx_arith_op_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 3 "cc_arithop"
+ [(match_operand:DI 1 "arith_double_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 3))]
+ "TARGET_ARCH64"
+ "%A3cc %1,%2,%0")
+
+(define_insn "*cmp_cc_xor_not"
+ [(set (reg:CC 100)
+ (compare:CC
+ (not:SI (xor:SI (match_operand:SI 0 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 1 "arith_operand" "rI")))
+ (const_int 0)))]
+ ""
+ "xnorcc %r0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_xor_not"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (not:DI (xor:DI (match_operand:DI 0 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 1 "arith_double_operand" "rHI")))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "xnorcc %r0,%1,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_xor_not_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (not:SI (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (xor:SI (match_dup 1) (match_dup 2))))]
+ ""
+ "xnorcc %r1,%2,%0")
+
+(define_insn "*cmp_ccx_xor_not_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (not:DI (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (xor:DI (match_dup 1) (match_dup 2))))]
+ "TARGET_ARCH64"
+ "xnorcc %r1,%2,%0")
+
+(define_insn "*cmp_cc_arith_op_not"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 2 "cc_arithopn"
+ [(not:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (match_operand:SI 1 "reg_or_0_operand" "rJ")])
+ (const_int 0)))]
+ ""
+ "%B2cc %r1,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_arith_op_not"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 2 "cc_arithopn"
+ [(not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (match_operand:DI 1 "reg_or_0_operand" "rJ")])
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "%B2cc %r1,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_arith_op_not_set"
+ [(set (reg:CC 100)
+ (compare:CC
+ (match_operator:SI 3 "cc_arithopn"
+ [(not:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (match_operand:SI 2 "reg_or_0_operand" "rJ")])
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (match_dup 3))]
+ ""
+ "%B3cc %r2,%1,%0")
+
+(define_insn "*cmp_ccx_arith_op_not_set"
+ [(set (reg:CCX 100)
+ (compare:CCX
+ (match_operator:DI 3 "cc_arithopn"
+ [(not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (match_operand:DI 2 "reg_or_0_operand" "rJ")])
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (match_dup 3))]
+ "TARGET_ARCH64"
+ "%B3cc %r2,%1,%0")
+
+;; We cannot use the "neg" pseudo insn because the Sun assembler
+;; does not know how to make it work for constants.
+
+(define_expand "negdi2"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))]
+ ""
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operand0,
+ gen_rtx_NEG (DImode, operand1)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode, SPARC_ICC_REG)))));
+ DONE;
+ }
+}")
+
+(define_insn "*negdi2_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))
+ (clobber (reg:CC 100))]
+ "! TARGET_ARCH64"
+ "*
+{
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"subcc %%g0,%L1,%L0\;subx %%g0,%H1,%H0\";
+}"
+ [(set_attr "type" "unary")
+ ;; ??? This is wrong for TARGET_LIVE_G0 but it's not critical.
+ (set_attr "length" "2")])
+
+(define_insn "*negdi2_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_operand:DI 1 "register_operand" "r")))]
+ "TARGET_ARCH64"
+ "sub %%g0,%1,%0"
+ [(set_attr "type" "unary")
+ (set_attr "length" "1")])
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_operand:SI 1 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (TARGET_LIVE_G0)
+ return \"and %%g0,0,%%g0\;sub %%g0,%1,%0\";
+ return \"sub %%g0,%1,%0\";
+}"
+ [(set_attr "type" "unary")
+ (set (attr "length")
+ (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1)))])
+
+(define_insn "*cmp_cc_neg"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (neg:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_neg"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (neg:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_neg"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (neg:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (neg:SI (match_dup 1)))]
+ "! TARGET_LIVE_G0"
+ "subcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*cmp_ccx_set_neg"
+ [(set (reg:CCX_NOOV 100)
+ (compare:CCX_NOOV (neg:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (neg:DI (match_dup 1)))]
+ "TARGET_ARCH64"
+ "subcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+;; We cannot use the "not" pseudo insn because the Sun assembler
+;; does not know how to make it work for constants.
+(define_expand "one_cmpldi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (not:DI (match_operand:DI 1 "register_operand" "")))]
+ ""
+ "")
+
+(define_insn "*one_cmpldi2_sp32"
+ [(set (match_operand:DI 0 "register_operand" "=r,b")
+ (not:DI (match_operand:DI 1 "register_operand" "r,b")))]
+ "! TARGET_ARCH64"
+ "@
+ xnor %1,0,%0\;xnor %R1,0,%R0
+ fnot1 %1,%0"
+ [(set_attr "type" "unary,fp")
+ (set_attr "length" "2,1")])
+
+(define_insn "*one_cmpldi2_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_operand:DI 1 "arith_double_operand" "rHI")))]
+ "TARGET_ARCH64"
+ "xnor %1,0,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,d")
+ (not:SI (match_operand:SI 1 "arith_operand" "r,I,d")))]
+ ""
+ "*
+{
+ if (which_alternative == 0)
+ return \"xnor %1,0,%0\";
+ if (which_alternative == 2)
+ return \"fnot1s %1,%0\";
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"xnor %%g0,%1,%0\";
+}"
+ [(set_attr "type" "unary,unary,fp")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "live_g0" "yes") (const_int 2) (const_int 1))
+ (const_int 1)])])
+
+(define_insn "*cmp_cc_not"
+ [(set (reg:CC 100)
+ (compare:CC (not:SI (match_operand:SI 0 "arith_operand" "rI"))
+ (const_int 0)))]
+ "! TARGET_LIVE_G0"
+ "xnorcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_ccx_not"
+ [(set (reg:CCX 100)
+ (compare:CCX (not:DI (match_operand:DI 0 "arith_double_operand" "rHI"))
+ (const_int 0)))]
+ "TARGET_ARCH64"
+ "xnorcc %%g0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_not"
+ [(set (reg:CC 100)
+ (compare:CC (not:SI (match_operand:SI 1 "arith_operand" "rI"))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_dup 1)))]
+ "! TARGET_LIVE_G0"
+ "xnorcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+(define_insn "*cmp_ccx_set_not"
+ [(set (reg:CCX 100)
+ (compare:CCX (not:DI (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (const_int 0)))
+ (set (match_operand:DI 0 "register_operand" "=r")
+ (not:DI (match_dup 1)))]
+ "TARGET_ARCH64"
+ "xnorcc %%g0,%1,%0"
+ [(set_attr "type" "unary")])
+
+;; Floating point arithmetic instructions.
+
+(define_insn "addtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (plus:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "faddq %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (plus:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "faddd %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (plus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fadds %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (minus:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fsubq %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (minus:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fsubd %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (minus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fsubs %1,%2,%0"
+ [(set_attr "type" "fp")])
+
+(define_insn "multf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (mult:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fmulq %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fmuld %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fmuls %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "*muldf3_extend"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "f"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "f"))))]
+ "(TARGET_V8 || TARGET_V9) && TARGET_FPU"
+ "fsmuld %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "*multf3_extend"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (mult:TF (float_extend:TF (match_operand:DF 1 "register_operand" "e"))
+ (float_extend:TF (match_operand:DF 2 "register_operand" "e"))))]
+ "(TARGET_V8 || TARGET_V9) && TARGET_FPU && TARGET_HARD_QUAD"
+ "fdmulq %1,%2,%0"
+ [(set_attr "type" "fpmul")])
+
+;; don't have timing for quad-prec. divide.
+(define_insn "divtf3"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (div:TF (match_operand:TF 1 "register_operand" "e")
+ (match_operand:TF 2 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fdivq %1,%2,%0"
+ [(set_attr "type" "fpdivd")])
+
+(define_insn "divdf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (div:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fdivd %1,%2,%0"
+ [(set_attr "type" "fpdivd")])
+
+(define_insn "divsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (div:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fdivs %1,%2,%0"
+ [(set_attr "type" "fpdivs")])
+
+(define_insn "negtf2"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (neg:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU"
+ "*
+{
+ /* v9: can't use fnegs, won't work with upper regs. */
+ if (which_alternative == 0)
+ return TARGET_V9 ? \"fnegd %0,%0\" : \"fnegs %0,%0\";
+ else
+ return TARGET_V9 ? \"fnegd %1,%0\;fmovd %S1,%S0\"
+ : \"fnegs %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+
+(define_insn "negdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (neg:DF (match_operand:DF 1 "register_operand" "0,e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fnegd %1,%0\";
+ else if (which_alternative == 0)
+ return \"fnegs %0,%0\";
+ else
+ return \"fnegs %1,%0\;fmovs %R1,%R0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+
+(define_insn "negsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fnegs %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "abstf2"
+ [(set (match_operand:TF 0 "register_operand" "=e,e")
+ (abs:TF (match_operand:TF 1 "register_operand" "0,e")))]
+ ; We don't use quad float insns here so we don't need TARGET_HARD_QUAD.
+ "TARGET_FPU"
+ "*
+{
+ /* v9: can't use fabss, won't work with upper regs. */
+ if (which_alternative == 0)
+ return TARGET_V9 ? \"fabsd %0,%0\" : \"fabss %0,%0\";
+ else
+ return TARGET_V9 ? \"fabsd %1,%0\;fmovd %S1,%S0\"
+ : \"fabss %1,%0\;fmovs %R1,%R0\;fmovs %S1,%S0\;fmovs %T1,%T0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 2) (const_int 4))])])
+
+(define_insn "absdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e,e")
+ (abs:DF (match_operand:DF 1 "register_operand" "0,e")))]
+ "TARGET_FPU"
+ "*
+{
+ if (TARGET_V9)
+ return \"fabsd %1,%0\";
+ else if (which_alternative == 0)
+ return \"fabss %0,%0\";
+ else
+ return \"fabss %1,%0\;fmovs %R1,%R0\";
+}"
+ [(set_attr "type" "fpmove")
+ (set_attr_alternative "length"
+ [(const_int 1)
+ (if_then_else (eq_attr "isa" "v9") (const_int 1) (const_int 2))])])
+
+(define_insn "abssf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (abs:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fabss %1,%0"
+ [(set_attr "type" "fpmove")])
+
+(define_insn "sqrttf2"
+ [(set (match_operand:TF 0 "register_operand" "=e")
+ (sqrt:TF (match_operand:TF 1 "register_operand" "e")))]
+ "TARGET_FPU && TARGET_HARD_QUAD"
+ "fsqrtq %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+(define_insn "sqrtdf2"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (sqrt:DF (match_operand:DF 1 "register_operand" "e")))]
+ "TARGET_FPU"
+ "fsqrtd %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+(define_insn "sqrtsf2"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (sqrt:SF (match_operand:SF 1 "register_operand" "f")))]
+ "TARGET_FPU"
+ "fsqrts %1,%0"
+ [(set_attr "type" "fpsqrt")])
+
+;;- arithmetic shift instructions
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"sll %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+{
+ if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_ashldi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+}")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashift:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"sllx %1,%2,%0\";
+}")
+
+(define_insn "ashldi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashift:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"sllx\");"
+ [(set_attr "length" "5,5,6")])
+
+;; Optimize (1LL<<x)-1
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=h")
+ (plus:DI (ashift:DI (const_int 1)
+ (match_operand:SI 2 "arith_operand" "rI"))
+ (const_int -1)))]
+ "TARGET_V8PLUS"
+ "*
+{
+ if (GET_CODE (operands[2]) == REG && REGNO (operands[2]) == REGNO (operands[0]))
+ return \"mov 1,%L0\;sllx %L0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+ return \"mov 1,%H0\;sllx %H0,%2,%L0\;sub %L0,1,%L0\;srlx %L0,32,%H0\";
+}"
+ [(set_attr "length" "4")])
+
+(define_insn "*cmp_cc_ashift_1"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (ashift:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1))
+ (const_int 0)))]
+ ""
+ "addcc %0,%0,%%g0"
+ [(set_attr "type" "compare")])
+
+(define_insn "*cmp_cc_set_ashift_1"
+ [(set (reg:CC_NOOV 100)
+ (compare:CC_NOOV (ashift:SI (match_operand:SI 1 "register_operand" "r")
+ (const_int 1))
+ (const_int 0)))
+ (set (match_operand:SI 0 "register_operand" "=r")
+ (ashift:SI (match_dup 1) (const_int 1)))]
+ ""
+ "addcc %1,%1,%0")
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"sra %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "ashrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL; /* prefer generic code in this case */
+ emit_insn (gen_ashrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (ashiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"srax %1,%2,%0\";
+}")
+
+(define_insn "ashrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (ashiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srax\");"
+ [(set_attr "length" "5,5,6")])
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ ""
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 31)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x1f);
+
+ return \"srl %1,%2,%0\";
+}"
+ [(set_attr "type" "shift")])
+
+(define_expand "lshrdi3"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64 || TARGET_V8PLUS"
+ "
+if (! TARGET_ARCH64)
+ {
+ if (GET_CODE (operands[2]) == CONST_INT)
+ FAIL;
+ emit_insn (gen_lshrdi3_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }")
+
+(define_insn ""
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (lshiftrt:DI (match_operand:DI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))]
+ "TARGET_ARCH64"
+ "*
+{
+ if (GET_CODE (operands[2]) == CONST_INT
+ && (unsigned HOST_WIDE_INT) INTVAL (operands[2]) > 63)
+ operands[2] = GEN_INT (INTVAL (operands[2]) & 0x3f);
+
+ return \"srlx %1,%2,%0\";
+}")
+
+(define_insn "lshrdi3_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=&h,&h,r")
+ (lshiftrt:DI (match_operand:DI 1 "arith_operand" "rI,0,rI")
+ (match_operand:SI 2 "arith_operand" "rI,rI,rI")))
+ (clobber (match_scratch:SI 3 "=X,X,&h"))]
+ "TARGET_V8PLUS"
+ "*return sparc_v8plus_shift (operands, insn, \"srlx\");"
+ [(set_attr "length" "5,5,6")])
+
+;; Unconditional and other jump instructions
+;; On the Sparc, by setting the annul bit on an unconditional branch, the
+;; following insn is never executed. This saves us a nop. Dbx does not
+;; handle such branches though, so we only use them when optimizing.
+(define_insn "jump"
+ [(set (pc) (label_ref (match_operand 0 "" "")))]
+ ""
+ "*
+{
+ /* Some implementations (e.g. TurboSparc) are reported to have problems
+ with
+ foo: b,a foo
+ i.e. an empty loop with the annul bit set. The workaround is to use
+ foo: b foo; nop
+ instead. */
+
+ if (flag_delayed_branch
+ && (insn_addresses[INSN_UID (operands[0])]
+ == insn_addresses[INSN_UID (insn)]))
+ return \"b %l0%#\";
+ else
+ return \"b%* %l0%(\";
+}"
+ [(set_attr "type" "uncond_branch")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand 0 "register_operand" "r"))
+ (use (label_ref (match_operand 1 "" "")))])]
+ ""
+ "
+{
+ if (GET_MODE (operands[0]) != Pmode)
+ abort ();
+
+ /* In pic mode, our address differences are against the base of the
+ table. Add that base value back in; CSE ought to be able to combine
+ the two address loads. */
+ if (flag_pic)
+ {
+ rtx tmp, tmp2;
+ tmp = gen_rtx_LABEL_REF (Pmode, operands[1]);
+ tmp2 = operands[0];
+ tmp = gen_rtx_PLUS (Pmode, tmp2, tmp);
+ operands[0] = memory_address (Pmode, tmp);
+ }
+}")
+
+(define_insn "*tablejump_sp32"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "! TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "*tablejump_sp64"
+ [(set (pc) (match_operand:DI 0 "address_operand" "p"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+;; This pattern recognizes the "instruction" that appears in
+;; a function call that wants a structure value,
+;; to inform the called function if compiled with Sun CC.
+;(define_insn "*unimp_insn"
+; [(match_operand:SI 0 "immediate_operand" "")]
+; "GET_CODE (operands[0]) == CONST_INT && INTVAL (operands[0]) > 0"
+; "unimp %0"
+; [(set_attr "type" "marker")])
+
+;;- jump to subroutine
+(define_expand "call"
+ ;; Note that this expression is not used for generating RTL.
+ ;; All the RTL is generated explicitly below.
+ [(call (match_operand 0 "call_operand" "")
+ (match_operand 3 "" "i"))]
+ ;; operands[2] is next_arg_register
+ ;; operands[3] is struct_value_size_rtx.
+ ""
+ "
+{
+ rtx fn_rtx, nregs_rtx;
+
+ if (GET_MODE (operands[0]) != FUNCTION_MODE)
+ abort ();
+
+ if (GET_CODE (XEXP (operands[0], 0)) == LABEL_REF)
+ {
+ /* This is really a PIC sequence. We want to represent
+ it as a funny jump so its delay slots can be filled.
+
+ ??? But if this really *is* a CALL, will not it clobber the
+ call-clobbered registers? We lose this if it is a JUMP_INSN.
+ Why cannot we have delay slots filled if it were a CALL? */
+
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
+ emit_jump_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (3,
+ gen_rtx_SET (VOIDmode, pc_rtx,
+ XEXP (operands[0], 0)),
+ GEN_INT (INTVAL (operands[3]) & 0xfff),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ else
+ emit_jump_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, pc_rtx,
+ XEXP (operands[0], 0)),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ goto finish_call;
+ }
+
+ fn_rtx = operands[0];
+
+ /* Count the number of parameter registers being used by this call.
+ if that argument is NULL, it means we are using them all, which
+ means 6 on the sparc. */
+#if 0
+ if (operands[2])
+ nregs_rtx = GEN_INT (REGNO (operands[2]) - 8);
+ else
+ nregs_rtx = GEN_INT (6);
+#else
+ nregs_rtx = const0_rtx;
+#endif
+
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) != 0)
+ emit_call_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (3, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ GEN_INT (INTVAL (operands[3]) & 0xfff),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+ else
+ emit_call_insn
+ (gen_rtx_PARALLEL (VOIDmode,
+ gen_rtvec (2, gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx),
+ gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (Pmode, 15)))));
+
+ finish_call:
+#if 0
+ /* If this call wants a structure value,
+ emit an unimp insn to let the called function know about this. */
+ if (! TARGET_ARCH64 && INTVAL (operands[3]) > 0)
+ {
+ rtx insn = emit_insn (operands[3]);
+ SCHED_GROUP_P (insn) = 1;
+ }
+#endif
+
+ DONE;
+}")
+
+;; We can't use the same pattern for these two insns, because then registers
+;; in the address may not be properly reloaded.
+
+(define_insn "*call_address_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_symbolic_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_address_sp64"
+ [(call (mem:SI (match_operand:DI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_symbolic_sp64"
+ [(call (mem:SI (match_operand:DI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "TARGET_PTR64"
+ "call %a0,%1%#"
+ [(set_attr "type" "call")])
+
+;; This is a call that wants a structure value.
+;; There is no such critter for v9 (??? we may need one anyway).
+(define_insn "*call_address_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that wants a structure value.
+;; There is no such critter for v9 (??? we may need one anyway).
+(define_insn "*call_symbolic_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) >= 0"
+ "call %a0,%1\;nop\;unimp %2"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that may want a structure value. This is used for
+;; untyped_calls.
+(define_insn "*call_address_untyped_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "address_operand" "p"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
+ "call %a0,%1\;nop\;nop"
+ [(set_attr "type" "call_no_delay_slot")])
+
+;; This is a call that wants a structure value.
+(define_insn "*call_symbolic_untyped_struct_value_sp32"
+ [(call (mem:SI (match_operand:SI 0 "symbolic_operand" "s"))
+ (match_operand 1 "" ""))
+ (match_operand 2 "immediate_operand" "")
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 1 for most machines.
+ "! TARGET_ARCH64 && GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) < 0"
+ "call %a0,%1\;nop\;nop"
+ [(set_attr "type" "call_no_delay_slot")])
+
+(define_expand "call_value"
+ ;; Note that this expression is not used for generating RTL.
+ ;; All the RTL is generated explicitly below.
+ [(set (match_operand 0 "register_operand" "=rf")
+ (call (match_operand:SI 1 "" "")
+ (match_operand 4 "" "")))]
+ ;; operand 2 is stack_size_rtx
+ ;; operand 3 is next_arg_register
+ ""
+ "
+{
+ rtx fn_rtx, nregs_rtx;
+ rtvec vec;
+
+ if (GET_MODE (operands[1]) != FUNCTION_MODE)
+ abort ();
+
+ fn_rtx = operands[1];
+
+#if 0
+ if (operands[3])
+ nregs_rtx = GEN_INT (REGNO (operands[3]) - 8);
+ else
+ nregs_rtx = GEN_INT (6);
+#else
+ nregs_rtx = const0_rtx;
+#endif
+
+ vec = gen_rtvec (2,
+ gen_rtx_SET (VOIDmode, operands[0],
+ gen_rtx_CALL (VOIDmode, fn_rtx, nregs_rtx)),
+ gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 15)));
+
+ emit_call_insn (gen_rtx_PARALLEL (VOIDmode, vec));
+
+ DONE;
+}")
+
+(define_insn "*call_value_address_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "address_operand" "p"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "! TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_symbolic_sp32"
+ [(set (match_operand 0 "" "=rf")
+ (call (mem:SI (match_operand:SI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "! TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_address_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "address_operand" "p"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_insn "*call_value_symbolic_sp64"
+ [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "symbolic_operand" "s"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))]
+ ;;- Do not use operand 2 for most machines.
+ "TARGET_PTR64"
+ "call %a1,%2%#"
+ [(set_attr "type" "call")])
+
+(define_expand "untyped_call"
+ [(parallel [(call (match_operand 0 "" "")
+ (const_int 0))
+ (match_operand 1 "" "")
+ (match_operand 2 "" "")])]
+ ""
+ "
+{
+ int i;
+
+ /* Pass constm1 to indicate that it may expect a structure value, but
+ we don't know what size it is. */
+ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, constm1_rtx));
+
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
+
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+}")
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] 0)]
+ ""
+ "")
+
+;; Prepare to return any type including a structure value.
+
+(define_expand "untyped_return"
+ [(match_operand:BLK 0 "memory_operand" "")
+ (match_operand 1 "" "")]
+ ""
+ "
+{
+ rtx valreg1 = gen_rtx_REG (DImode, 24);
+ rtx valreg2 = gen_rtx_REG (TARGET_ARCH64 ? TFmode : DFmode, 32);
+ rtx result = operands[0];
+
+ if (! TARGET_ARCH64)
+ {
+ rtx rtnreg = gen_rtx_REG (SImode, (leaf_function ? 15 : 31));
+ rtx value = gen_reg_rtx (SImode);
+
+ /* Fetch the instruction where we will return to and see if it's an unimp
+ instruction (the most significant 10 bits will be zero). If so,
+ update the return address to skip the unimp instruction. */
+ emit_move_insn (value,
+ gen_rtx_MEM (SImode, plus_constant (rtnreg, 8)));
+ emit_insn (gen_lshrsi3 (value, value, GEN_INT (22)));
+ emit_insn (gen_update_return (rtnreg, value));
+ }
+
+ /* Reload the function value registers. */
+ emit_move_insn (valreg1, change_address (result, DImode, XEXP (result, 0)));
+ emit_move_insn (valreg2,
+ change_address (result, TARGET_ARCH64 ? TFmode : DFmode,
+ plus_constant (XEXP (result, 0), 8)));
+
+ /* Put USE insns before the return. */
+ emit_insn (gen_rtx_USE (VOIDmode, valreg1));
+ emit_insn (gen_rtx_USE (VOIDmode, valreg2));
+
+ /* Construct the return. */
+ expand_null_return ();
+
+ DONE;
+}")
+
+;; This is a bit of a hack. We're incrementing a fixed register (%i7),
+;; and parts of the compiler don't want to believe that the add is needed.
+
+(define_insn "update_return"
+ [(unspec:SI [(match_operand:SI 0 "register_operand" "r")
+ (match_operand:SI 1 "register_operand" "r")] 1)]
+ "! TARGET_ARCH64"
+ "cmp %1,0\;be,a .+8\;add %0,4,%0"
+ [(set_attr "type" "multi")])
+
+(define_insn "return"
+ [(return)
+ (use (reg:SI 31))]
+ "! TARGET_EPILOGUE"
+ "* return output_return (operands);"
+ [(set_attr "type" "return")])
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (parallel [(return)
+ (use (reg:SI 31))])]
+ "sparc_return_peephole_ok (operands[0], operands[1])"
+ "return %%i7+8\;mov %Y1,%Y0")
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_expand "indirect_jump"
+ [(set (pc) (match_operand 0 "address_operand" "p"))]
+ ""
+ "")
+
+(define_insn "*branch_sp32"
+ [(set (pc) (match_operand:SI 0 "address_operand" "p"))]
+ "! TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+(define_insn "*branch_sp64"
+ [(set (pc) (match_operand:DI 0 "address_operand" "p"))]
+ "TARGET_PTR64"
+ "jmp %a0%#"
+ [(set_attr "type" "uncond_branch")])
+
+;; ??? Doesn't work with -mflat.
+(define_expand "nonlocal_goto"
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" "")
+ (match_operand:SI 2 "general_operand" "")
+ (match_operand:SI 3 "" "")]
+ ""
+ "
+{
+ rtx chain = operands[0];
+ rtx fp = operands[1];
+ rtx stack = operands[2];
+ rtx lab = operands[3];
+ rtx labreg;
+
+ /* Trap instruction to flush all the register windows. */
+ emit_insn (gen_flush_register_windows ());
+
+ /* Load the fp value for the containing fn into %fp. This is needed
+ because STACK refers to %fp. Note that virtual register instantiation
+ fails if the virtual %fp isn't set from a register. */
+ if (GET_CODE (fp) != REG)
+ fp = force_reg (Pmode, fp);
+ emit_move_insn (virtual_stack_vars_rtx, fp);
+
+ /* Find the containing function's current nonlocal goto handler,
+ which will do any cleanups and then jump to the label. */
+ labreg = gen_rtx_REG (Pmode, 8);
+ emit_move_insn (labreg, lab);
+
+ /* Restore %fp from stack pointer value for containing function.
+ The restore insn that follows will move this to %sp,
+ and reload the appropriate value into %fp. */
+ emit_move_insn (frame_pointer_rtx, stack);
+
+ /* USE of frame_pointer_rtx added for consistency; not clear if
+ really needed. */
+ /*emit_insn (gen_rtx_USE (VOIDmode, frame_pointer_rtx));*/
+ emit_insn (gen_rtx_USE (VOIDmode, stack_pointer_rtx));
+ /* Return, restoring reg window and jumping to goto handler. */
+ if (TARGET_V9 && GET_CODE (chain) == CONST_INT
+ && ! (INTVAL (chain) & ~(HOST_WIDE_INT)0xffffffff))
+ {
+ emit_insn (gen_goto_handler_and_restore_v9 (labreg, static_chain_rtx,
+ chain));
+ emit_barrier ();
+ DONE;
+ }
+ /* Put in the static chain register the nonlocal label address. */
+ emit_move_insn (static_chain_rtx, chain);
+ emit_insn (gen_rtx_USE (VOIDmode, static_chain_rtx));
+ emit_insn (gen_goto_handler_and_restore (labreg));
+ emit_barrier ();
+ DONE;
+}")
+
+;; Special trap insn to flush register windows.
+(define_insn "flush_register_windows"
+ [(unspec_volatile [(const_int 0)] 1)]
+ ""
+ "* return TARGET_V9 ? \"flushw\" : \"ta 3\";"
+ [(set_attr "type" "misc")])
+
+(define_insn "goto_handler_and_restore"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r")] 2)]
+ ""
+ "jmp %0+0\;restore"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2")])
+
+(define_insn "goto_handler_and_restore_v9"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "=r,r")
+ (match_operand:SI 1 "register_operand" "=r,r")
+ (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+ "TARGET_V9 && ! TARGET_ARCH64"
+ "@
+ return %0+0\;mov %2,%Y1
+ sethi %%hi(%2),%1\;return %0+0\;or %Y1,%%lo(%2),%Y1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2,3")])
+
+(define_insn "*goto_handler_and_restore_v9_sp64"
+ [(unspec_volatile [(match_operand:DI 0 "register_operand" "=r,r")
+ (match_operand:DI 1 "register_operand" "=r,r")
+ (match_operand:SI 2 "const_int_operand" "I,n")] 3)]
+ "TARGET_V9 && TARGET_ARCH64"
+ "@
+ return %0+0\;mov %2,%Y1
+ sethi %%hi(%2),%1\;return %0+0\;or %Y1,%%lo(%2),%Y1"
+ [(set_attr "type" "misc")
+ (set_attr "length" "2,3")])
+
+;; Pattern for use after a setjmp to store FP and the return register
+;; into the stack area.
+
+(define_expand "setjmp"
+ [(const_int 0)]
+ ""
+ "
+{
+ if (TARGET_ARCH64)
+ emit_insn (gen_setjmp_64 ());
+ else
+ emit_insn (gen_setjmp_32 ());
+ DONE;
+}")
+
+(define_expand "setjmp_32"
+ [(set (mem:SI (plus:SI (reg:SI 14) (const_int 56))) (match_dup 0))
+ (set (mem:SI (plus:SI (reg:SI 14) (const_int 60))) (reg:SI 31))]
+ ""
+ "
+{ operands[0] = frame_pointer_rtx; }")
+
+(define_expand "setjmp_64"
+ [(set (mem:DI (plus:DI (reg:DI 14) (const_int 112))) (match_dup 0))
+ (set (mem:DI (plus:DI (reg:DI 14) (const_int 120))) (reg:DI 31))]
+ ""
+ "
+{ operands[0] = frame_pointer_rtx; }")
+
+;; Special pattern for the FLUSH instruction.
+
+(define_insn "flush"
+ [(unspec_volatile [(match_operand 0 "memory_operand" "m")] 3)]
+ ""
+ "* return TARGET_V9 ? \"flush %f0\" : \"iflush %f0\";"
+ [(set_attr "type" "misc")])
+
+;; find first set.
+
+;; The scan instruction searches from the most significant bit while ffs
+;; searches from the least significant bit. The bit index and treatment of
+;; zero also differ. It takes at least 7 instructions to get the proper
+;; result. Here is an obvious 8 instruction sequence.
+
+(define_insn "ffssi2"
+ [(set (match_operand:SI 0 "register_operand" "=&r")
+ (ffs:SI (match_operand:SI 1 "register_operand" "r")))
+ (clobber (match_scratch:SI 2 "=&r"))]
+ "TARGET_SPARCLITE || TARGET_SPARCLET"
+ "*
+{
+ if (TARGET_LIVE_G0)
+ output_asm_insn (\"and %%g0,0,%%g0\", operands);
+ return \"sub %%g0,%1,%0\;and %0,%1,%0\;scan %0,0,%0\;mov 32,%2\;sub %2,%0,%0\;sra %0,31,%2\;and %2,31,%2\;add %2,%0,%0\";
+}"
+ [(set_attr "type" "multi")
+ (set_attr "length" "8")])
+
+;; ??? This should be a define expand, so that the extra instruction have
+;; a chance of being optimized away.
+
+;; Disabled because none of the UltraSparcs implement popc. The HAL R1
+;; does, but no one uses that and we don't have a switch for it.
+;
+;(define_insn "ffsdi2"
+; [(set (match_operand:DI 0 "register_operand" "=&r")
+; (ffs:DI (match_operand:DI 1 "register_operand" "r")))
+; (clobber (match_scratch:DI 2 "=&r"))]
+; "TARGET_ARCH64"
+; "neg %1,%2\;xnor %1,%2,%2\;popc %2,%0\;movzr %1,0,%0"
+; [(set_attr "type" "multi")
+; (set_attr "length" "4")])
+
+;; Split up troublesome insns for better scheduling. */
+
+;; The following patterns are straightforward. They can be applied
+;; either before or after register allocation.
+
+(define_split
+ [(set (match_operand 0 "splittable_symbolic_memory_operand" "")
+ (match_operand 1 "reg_or_0_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "! flag_pic"
+ [(set (match_dup 2) (high:SI (match_dup 3)))
+ (set (match_dup 4) (match_dup 1))]
+ "
+{
+ operands[3] = XEXP (operands[0], 0);
+ operands[4] = gen_rtx_MEM (GET_MODE (operands[0]),
+ gen_rtx_LO_SUM (SImode, operands[2], operands[3]));
+ MEM_IN_STRUCT_P (operands[4]) = MEM_IN_STRUCT_P (operands[0]);
+ MEM_VOLATILE_P (operands[4]) = MEM_VOLATILE_P (operands[0]);
+ RTX_UNCHANGING_P (operands[4]) = RTX_UNCHANGING_P (operands[0]);
+}")
+
+(define_split
+ [(set (match_operand 0 "splittable_immediate_memory_operand" "")
+ (match_operand 1 "general_operand" ""))
+ (clobber (match_operand:SI 2 "register_operand" ""))]
+ "flag_pic"
+ [(set (match_dup 3) (match_dup 1))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[0], 0),
+ GET_MODE (operands[0]),
+ operands[2]);
+ operands[3] = gen_rtx_MEM (GET_MODE (operands[0]), addr);
+ MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[0]);
+ MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[0]);
+ RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[0]);
+}")
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operand 1 "splittable_immediate_memory_operand" ""))]
+ "flag_pic"
+ [(set (match_dup 0) (match_dup 2))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[1], 0),
+ GET_MODE (operands[1]),
+ operands[0]);
+ operands[2] = gen_rtx_MEM (GET_MODE (operands[1]), addr);
+ MEM_IN_STRUCT_P (operands[2]) = MEM_IN_STRUCT_P (operands[1]);
+ MEM_VOLATILE_P (operands[2]) = MEM_VOLATILE_P (operands[1]);
+ RTX_UNCHANGING_P (operands[2]) = RTX_UNCHANGING_P (operands[1]);
+}")
+
+;; Sign- and Zero-extend operations can have symbolic memory operands.
+
+(define_split
+ [(set (match_operand 0 "register_operand" "")
+ (match_operator 1 "extend_op" [(match_operand 2 "splittable_immediate_memory_operand" "")]))]
+ "flag_pic"
+ [(set (match_dup 0) (match_op_dup 1 [(match_dup 3)]))]
+ "
+{
+ rtx addr = legitimize_pic_address (XEXP (operands[2], 0),
+ GET_MODE (operands[2]),
+ operands[0]);
+ operands[3] = gen_rtx_MEM (GET_MODE (operands[2]), addr);
+ MEM_IN_STRUCT_P (operands[3]) = MEM_IN_STRUCT_P (operands[2]);
+ MEM_VOLATILE_P (operands[3]) = MEM_VOLATILE_P (operands[2]);
+ RTX_UNCHANGING_P (operands[3]) = RTX_UNCHANGING_P (operands[2]);
+}")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))]
+ "! flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST
+ || GET_CODE (operands[1]) == LABEL_REF)"
+ [(set (match_dup 0) (high:SI (match_dup 1)))
+ (set (match_dup 0)
+ (lo_sum:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+;; LABEL_REFs are not modified by `legitimize_pic_address'
+;; so do not recurse infinitely in the PIC case.
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "immediate_operand" ""))]
+ "flag_pic && (GET_CODE (operands[1]) == SYMBOL_REF
+ || GET_CODE (operands[1]) == CONST)"
+ [(set (match_dup 0) (match_dup 1))]
+ "
+{
+ operands[1] = legitimize_pic_address (operands[1], Pmode, operands[0]);
+}")
+
+;; These split sne/seq insns. The forms of the resulting insns are
+;; somewhat bogus, but they avoid extra patterns and show data dependency.
+;; Nothing will look at these in detail after splitting has occurred.
+
+;; ??? v9 DImode versions are missing because addc and subc use %icc.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (ltu:SI (reg:CC 100) (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0)))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (geu:SI (reg:CC 100) (const_int 0)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (neg:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (neg:SI (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (ltu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (ne:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (ltu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (plus:SI (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))
+ (match_operand:SI 2 "register_operand" "")))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (plus:SI (geu:SI (reg:CC 100) (const_int 0))
+ (match_dup 2)))]
+ "")
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (minus:SI (match_operand:SI 2 "register_operand" "")
+ (eq:SI (match_operand:SI 1 "register_operand" "")
+ (const_int 0))))
+ (clobber (reg:CC 100))]
+ ""
+ [(set (reg:CC_NOOV 100) (compare:CC_NOOV (neg:SI (match_dup 1))
+ (const_int 0)))
+ (set (match_dup 0) (minus:SI (match_dup 2)
+ (geu:SI (reg:CC 100) (const_int 0))))]
+ "")
+
+;; Peepholes go at the end.
+
+;; Optimize consecutive loads or stores into ldd and std when possible.
+;; The conditions in which we do this are very restricted and are
+;; explained in the code for {registers,memory}_ok_for_ldd functions.
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[1], 0))"
+ "stx %%g0,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (const_int 0))
+ (set (match_operand:SI 1 "memory_operand" "")
+ (const_int 0))]
+ "TARGET_V9
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[0], 0))"
+ "stx %%g0,%1")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=rf")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:SI 2 "register_operand" "=rf")
+ (match_operand:SI 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
+ "ldd %1,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "register_operand" "rf"))
+ (set (match_operand:SI 2 "memory_operand" "")
+ (match_operand:SI 3 "register_operand" "rf"))]
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "std %1,%0")
+
+(define_peephole
+ [(set (match_operand:SF 0 "register_operand" "=fr")
+ (match_operand:SF 1 "memory_operand" ""))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (match_operand:SF 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[0], operands[2])
+ && ! MEM_VOLATILE_P (operands[1]) && ! MEM_VOLATILE_P (operands[3])
+ && addrs_ok_for_ldd_peep (XEXP (operands[1], 0), XEXP (operands[3], 0))"
+ "ldd %1,%0")
+
+(define_peephole
+ [(set (match_operand:SF 0 "memory_operand" "")
+ (match_operand:SF 1 "register_operand" "fr"))
+ (set (match_operand:SF 2 "memory_operand" "")
+ (match_operand:SF 3 "register_operand" "fr"))]
+ "registers_ok_for_ldd_peep (operands[1], operands[3])
+ && ! MEM_VOLATILE_P (operands[0]) && ! MEM_VOLATILE_P (operands[2])
+ && addrs_ok_for_ldd_peep (XEXP (operands[0], 0), XEXP (operands[2], 0))"
+ "std %1,%0")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=rf")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_operand:SI 2 "register_operand" "=rf")
+ (match_operand:SI 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd %3,%2")
+
+(define_peephole
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (match_operand:SI 1 "register_operand" "rf"))
+ (set (match_operand:SI 2 "memory_operand" "")
+ (match_operand:SI 3 "register_operand" "rf"))]
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std %3,%2")
+
+(define_peephole
+ [(set (match_operand:SF 0 "register_operand" "=fr")
+ (match_operand:SF 1 "memory_operand" ""))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (match_operand:SF 3 "memory_operand" ""))]
+ "registers_ok_for_ldd_peep (operands[2], operands[0])
+ && ! MEM_VOLATILE_P (operands[3]) && ! MEM_VOLATILE_P (operands[1])
+ && addrs_ok_for_ldd_peep (XEXP (operands[3], 0), XEXP (operands[1], 0))"
+ "ldd %3,%2")
+
+(define_peephole
+ [(set (match_operand:SF 0 "memory_operand" "")
+ (match_operand:SF 1 "register_operand" "fr"))
+ (set (match_operand:SF 2 "memory_operand" "")
+ (match_operand:SF 3 "register_operand" "fr"))]
+ "registers_ok_for_ldd_peep (operands[3], operands[1])
+ && ! MEM_VOLATILE_P (operands[2]) && ! MEM_VOLATILE_P (operands[0])
+ && addrs_ok_for_ldd_peep (XEXP (operands[2], 0), XEXP (operands[0], 0))"
+ "std %3,%2")
+
+;; Optimize the case of following a reg-reg move with a test
+;; of reg just moved. Don't allow floating point regs for operand 0 or 1.
+;; This can result from a float to fix conversion.
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (match_operand:SI 1 "register_operand" "r"))
+ (set (reg:CC 100)
+ (compare:CC (match_operand:SI 2 "register_operand" "r")
+ (const_int 0)))]
+ "(rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[2], operands[1]))
+ && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
+ "orcc %1,0,%0")
+
+(define_peephole
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (match_operand:DI 1 "register_operand" "r"))
+ (set (reg:CCX 100)
+ (compare:CCX (match_operand:DI 2 "register_operand" "r")
+ (const_int 0)))]
+ "TARGET_ARCH64
+ && (rtx_equal_p (operands[2], operands[0])
+ || rtx_equal_p (operands[2], operands[1]))
+ && ! FP_REG_P (operands[0]) && ! FP_REG_P (operands[1])"
+ "orcc %1,0,%0")
+
+;; Floating-point move peepholes
+;; ??? v9: Do we want similar ones?
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_dup 0)
+ (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:DF 2 "register_operand" "=er")
+ (mem:DF (match_dup 0)))]
+ "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
+ "*
+{
+ /* Go by way of output_move_double in case the register in operand 2
+ is not properly aligned for ldd. */
+ operands[1] = gen_rtx_MEM (DFmode,
+ gen_rtx_LO_SUM (SImode, operands[0], operands[1]));
+ operands[0] = operands[2];
+ return output_move_double (operands);
+}")
+
+(define_peephole
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (lo_sum:SI (match_dup 0)
+ (match_operand:SI 1 "immediate_operand" "i")))
+ (set (match_operand:SF 2 "register_operand" "=fr")
+ (mem:SF (match_dup 0)))]
+ "RTX_UNCHANGING_P (operands[1]) && reg_unused_after (operands[0], insn)"
+ "ld [%0+%%lo(%a1)],%2")
+
+;; Return peepholes. First the "normal" ones.
+;; These are necessary to catch insns ending up in the epilogue delay list.
+
+(define_insn "*return_qi"
+ [(set (match_operand:QI 0 "restore_operand" "")
+ (match_operand:QI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_hi"
+ [(set (match_operand:HI 0 "restore_operand" "")
+ (match_operand:HI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_si"
+ [(set (match_operand:SI 0 "restore_operand" "")
+ (match_operand:SI 1 "arith_operand" "rI"))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && (GET_CODE (operands[1]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[1])))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+;; The following pattern is only generated by delayed-branch scheduling,
+;; when the insn winds up in the epilogue. This can only happen when
+;; ! TARGET_FPU because otherwise fp return values are in %f0.
+(define_insn "*return_sf_no_fpu"
+ [(set (match_operand:SF 0 "restore_operand" "r")
+ (match_operand:SF 1 "register_operand" "r"))
+ (return)]
+ "! TARGET_FPU && ! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %%g0,%1,%Y0\";
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1]))
+ return \"return %%i7+8\;mov %Y1,%Y0\";
+ else
+ return \"ret\;restore %%g0,%1,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_addsi"
+ [(set (match_operand:SI 0 "restore_operand" "")
+ (plus:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "arith_operand" "rI")))
+ (return)]
+ "! TARGET_EPILOGUE && ! TARGET_LIVE_G0"
+ "*
+{
+ if (! TARGET_ARCH64 && current_function_returns_struct)
+ return \"jmp %%i7+12\;restore %r1,%2,%Y0\";
+ /* If operands are global or in registers, can use return */
+ else if (TARGET_V9 && IN_OR_GLOBAL_P (operands[1])
+ && (GET_CODE (operands[2]) == CONST_INT
+ || IN_OR_GLOBAL_P (operands[2])))
+ return \"return %%i7+8\;add %Y1,%Y2,%Y0\";
+ else
+ return \"ret\;restore %r1,%2,%Y0\";
+}"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_di"
+ [(set (match_operand:DI 0 "restore_operand" "")
+ (match_operand:DI 1 "arith_double_operand" "rHI"))
+ (return)]
+ "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "ret\;restore %%g0,%1,%Y0"
+ [(set_attr "type" "multi")])
+
+(define_insn "*return_adddi"
+ [(set (match_operand:DI 0 "restore_operand" "")
+ (plus:DI (match_operand:DI 1 "arith_operand" "%r")
+ (match_operand:DI 2 "arith_double_operand" "rHI")))
+ (return)]
+ "TARGET_ARCH64 && ! TARGET_EPILOGUE"
+ "ret\;restore %r1,%2,%Y0"
+ [(set_attr "type" "multi")])
+
+;; The following pattern is only generated by delayed-branch scheduling,
+;; when the insn winds up in the epilogue.
+(define_insn "*return_sf"
+ [(set (reg:SF 32)
+ (match_operand:SF 0 "register_operand" "f"))
+ (return)]
+ "! TARGET_EPILOGUE"
+ "ret\;fmovs %0,%%f0"
+ [(set_attr "type" "multi")])
+
+;; Now peepholes to do a call followed by a jump.
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:SI 1 "call_operand_address" "ps"))
+ (match_operand 2 "" "")))
+ (clobber (reg:SI 15))])
+ (set (pc) (label_ref (match_operand 3 "" "")))]
+ "short_branch (INSN_UID (insn), INSN_UID (operands[3])) && in_same_eh_region (insn, operands[3]) && in_same_eh_region (insn, ins1)"
+ "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:SI 0 "call_operand_address" "ps"))
+ (match_operand 1 "" ""))
+ (clobber (reg:SI 15))])
+ (set (pc) (label_ref (match_operand 2 "" "")))]
+ "short_branch (INSN_UID (insn), INSN_UID (operands[2])) && in_same_eh_region (insn, operands[2]) && in_same_eh_region (insn, ins1)"
+ "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(set (match_operand 0 "" "")
+ (call (mem:SI (match_operand:DI 1 "call_operand_address" "ps"))
+ (match_operand 2 "" "")))
+ (clobber (reg:DI 15))])
+ (set (pc) (label_ref (match_operand 3 "" "")))]
+ "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[3])) && in_same_eh_region (insn, operands[3]) && in_same_eh_region (insn, ins1)"
+ "call %a1,%2\;add %%o7,(%l3-.-4),%%o7")
+
+(define_peephole
+ [(parallel [(call (mem:SI (match_operand:DI 0 "call_operand_address" "ps"))
+ (match_operand 1 "" ""))
+ (clobber (reg:DI 15))])
+ (set (pc) (label_ref (match_operand 2 "" "")))]
+ "TARGET_ARCH64 && short_branch (INSN_UID (insn), INSN_UID (operands[2])) && in_same_eh_region (insn, operands[2]) && in_same_eh_region (insn, ins1)"
+ "call %a0,%1\;add %%o7,(%l2-.-4),%%o7")
+
+;; After a nonlocal goto, we need to restore the PIC register, but only
+;; if we need it. So do nothing much here, but we'll check for this in
+;; finalize_pic.
+
+(define_insn "nonlocal_goto_receiver"
+ [(unspec_volatile [(const_int 0)] 4)]
+ "flag_pic"
+ "")
+
+(define_insn "trap"
+ [(trap_if (const_int 1) (const_int 5))]
+ ""
+ "ta 5"
+ [(set_attr "type" "misc")])
+
+(define_expand "conditional_trap"
+ [(trap_if (match_operator 0 "noov_compare_op"
+ [(match_dup 2) (match_dup 3)])
+ (match_operand:SI 1 "arith_operand" ""))]
+ ""
+ "operands[2] = gen_compare_reg (GET_CODE (operands[0]),
+ sparc_compare_op0, sparc_compare_op1);
+ operands[3] = const0_rtx;")
+
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CC 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
+ ""
+ "t%C0 %1"
+ [(set_attr "type" "misc")])
+
+(define_insn ""
+ [(trap_if (match_operator 0 "noov_compare_op" [(reg:CCX 100) (const_int 0)])
+ (match_operand:SI 1 "arith_operand" "rM"))]
+ "TARGET_V9"
+ "t%C0 %%xcc,%1"
+ [(set_attr "type" "misc")])
+
diff --git a/contrib/gcc/config/sparc/splet.h b/contrib/gcc/config/sparc/splet.h
new file mode 100644
index 0000000..23c6414
--- /dev/null
+++ b/contrib/gcc/config/sparc/splet.h
@@ -0,0 +1,53 @@
+/* Definitions of target machine for GNU compiler, for SPARClet.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Doug Evans (dje@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* -mbroken-saverestore is not included here because the long term
+ default is -mno-broken-saverestore. */
+#undef TARGET_DEFAULT
+#define TARGET_DEFAULT (MASK_APP_REGS + MASK_EPILOGUE)
+
+/* -mlive-g0 is only supported on the sparclet. */
+#undef SUBTARGET_SWITCHES
+#define SUBTARGET_SWITCHES \
+{"big-endian", -MASK_LITTLE_ENDIAN}, \
+{"little-endian", MASK_LITTLE_ENDIAN}, \
+{"live-g0", MASK_LIVE_G0}, \
+{"no-live-g0", -MASK_LIVE_G0}, \
+{"broken-saverestore", MASK_BROKEN_SAVERESTORE}, \
+{"no-broken-saverestore", -MASK_BROKEN_SAVERESTORE},
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{mlittle-endian:-EL} %(asm_cpu)"
+
+/* Require the user to supply crt0.o. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{mlittle-endian:-EL}"
+
+/* sparclet chips are bi-endian. */
+#undef BYTES_BIG_ENDIAN
+#define BYTES_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
+#undef WORDS_BIG_ENDIAN
+#define WORDS_BIG_ENDIAN (! TARGET_LITTLE_ENDIAN)
diff --git a/contrib/gcc/config/sparc/sun4gas.h b/contrib/gcc/config/sparc/sun4gas.h
new file mode 100644
index 0000000..3cea956
--- /dev/null
+++ b/contrib/gcc/config/sparc/sun4gas.h
@@ -0,0 +1,27 @@
+/* Definitions of target machine for GNU compiler, for SunOS 4.x with gas
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* gas supports unaligned data. */
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+#define UNALIGNED_INT_ASM_OP ".uaword"
+#define UNALIGNED_SHORT_ASM_OP ".uahalf"
+
+/* defaults.h will define DWARF2_UNWIND_INFO for us. */
+#undef DWARF2_UNWIND_INFO
diff --git a/contrib/gcc/config/sparc/sun4o3.h b/contrib/gcc/config/sparc/sun4o3.h
new file mode 100644
index 0000000..10c7391
--- /dev/null
+++ b/contrib/gcc/config/sparc/sun4o3.h
@@ -0,0 +1,29 @@
+#include "sparc/sparc.h"
+
+#undef FUNCTION_PROFILER
+#define FUNCTION_PROFILER(FILE, LABELNO) \
+ fprintf (FILE, "\tsethi %%hi(LP%d),%%o0\n\tcall .mcount\n\tor %%lo(LP%d),%%o0,%%o0\n", \
+ (LABELNO), (LABELNO))
+
+/* LINK_SPEC is needed only for SunOS 4. */
+
+#undef LINK_SPEC
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/sunos4.h b/contrib/gcc/config/sparc/sunos4.h
new file mode 100644
index 0000000..14c7a43
--- /dev/null
+++ b/contrib/gcc/config/sparc/sunos4.h
@@ -0,0 +1,49 @@
+/* Definitions of target machine for GNU compiler, for SunOS 4.x
+ Copyright (C) 1994 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#define SUNOS4_SHARED_LIBRARIES 1
+
+/* Use N_BINCL stabs. */
+
+#define DBX_USE_BINCL
+
+#include "sparc/sparc.h"
+
+/* The Sun as doesn't like unaligned data. */
+#define DWARF2_UNWIND_INFO 0
+
+/* Override MACHINE_STATE_{SAVE,RESTORE} because we have special
+ traps available which can get and set the condition codes
+ reliably. */
+#undef MACHINE_STATE_SAVE
+#define MACHINE_STATE_SAVE(ID) \
+ unsigned long int ms_flags, ms_saveret; \
+ asm volatile("ta 0x20\n\t" \
+ "mov %%g1, %0\n\t" \
+ "mov %%g2, %1\n\t" \
+ : "=r" (ms_flags), "=r" (ms_saveret));
+
+#undef MACHINE_STATE_RESTORE
+#define MACHINE_STATE_RESTORE(ID) \
+ asm volatile("mov %0, %%g1\n\t" \
+ "mov %1, %%g2\n\t" \
+ "ta 0x21\n\t" \
+ : /* no outputs */ \
+ : "r" (ms_flags), "r" (ms_saveret));
diff --git a/contrib/gcc/config/sparc/sysv4.h b/contrib/gcc/config/sparc/sysv4.h
new file mode 100644
index 0000000..7e90bdd
--- /dev/null
+++ b/contrib/gcc/config/sparc/sysv4.h
@@ -0,0 +1,231 @@
+/* Target definitions for GNU compiler for Sparc running System V.4
+ Copyright (C) 1991, 92, 95, 96, 97, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@monkeys.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/sparc.h"
+
+/* Undefine some symbols which are defined in "sparc.h" but which are
+ appropriate only for SunOS 4.x, and not for svr4. */
+
+#undef WORD_SWITCH_TAKES_ARG
+#undef ASM_OUTPUT_SOURCE_LINE
+#undef SELECT_SECTION
+#undef ASM_DECLARE_FUNCTION_NAME
+#undef TEXT_SECTION_ASM_OP
+#undef DATA_SECTION_ASM_OP
+
+#include "svr4.h"
+
+/* ??? Put back the SIZE_TYPE/PTRDIFF_TYPE definitions set by sparc.h.
+ Why, exactly, is svr4.h messing with this? Seems like the chip
+ would know best. */
+
+#undef SIZE_TYPE
+#define SIZE_TYPE (TARGET_ARCH64 ? "long unsigned int" : "unsigned int")
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE (TARGET_ARCH64 ? "long int" : "int")
+
+/* Undefined some symbols which are defined in "svr4.h" but which are
+ appropriate only for typical svr4 systems, but not for the specific
+ case of svr4 running on a Sparc. */
+
+#undef INIT_SECTION_ASM_OP
+#undef FINI_SECTION_ASM_OP
+#undef CONST_SECTION_ASM_OP
+#undef TYPE_OPERAND_FMT
+#undef PUSHSECTION_FORMAT
+#undef STRING_ASM_OP
+#undef COMMON_ASM_OP
+#undef SKIP_ASM_OP
+#undef SET_ASM_OP /* Has no equivalent. See ASM_OUTPUT_DEF below. */
+
+/* Provide a set of pre-definitions and pre-assertions appropriate for
+ the Sparc running svr4. __svr4__ is our extension. */
+
+#define CPP_PREDEFINES \
+"-Dsparc -Dunix -D__svr4__ -Asystem(unix) -Asystem(svr4)"
+
+/* The native assembler can't compute differences between symbols in different
+ sections when generating pic code, so we must put jump tables in the
+ text section. */
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* Pass -K to the assembler when PIC. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{v:-V} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Yd,*} %{Wa,*:%*} \
+ %{fpic:-K PIC} %{fPIC:-K PIC} %(asm_cpu)"
+
+/* Must use data section for relocatable constants when pic. */
+#undef SELECT_RTX_SECTION
+#define SELECT_RTX_SECTION(MODE,RTX) \
+{ \
+ if (flag_pic && symbolic_operand (RTX)) \
+ data_section (); \
+ else \
+ const_section (); \
+}
+
+/* Define the names of various pseudo-op used by the Sparc/svr4 assembler.
+ Note that many of these are different from the typical pseudo-ops used
+ by most svr4 assemblers. That is probably due to a (misguided?) attempt
+ to keep the Sparc/svr4 assembler somewhat compatible with the Sparc/SunOS
+ assembler. */
+
+#define STRING_ASM_OP ".asciz"
+#define COMMON_ASM_OP ".common"
+#define SKIP_ASM_OP ".skip"
+#define UNALIGNED_DOUBLE_INT_ASM_OP ".uaxword"
+#define UNALIGNED_INT_ASM_OP ".uaword"
+#define UNALIGNED_SHORT_ASM_OP ".uahalf"
+#define PUSHSECTION_ASM_OP ".pushsection"
+#define POPSECTION_ASM_OP ".popsection"
+
+/* This is defined in sparc.h but is not used by svr4.h. */
+#undef ASM_LONG
+#define ASM_LONG ".long"
+
+/* This is the format used to print the second operand of a .type pseudo-op
+ for the Sparc/svr4 assembler. */
+
+#define TYPE_OPERAND_FMT "#%s"
+
+/* This is the format used to print a .pushsection pseudo-op (and its operand)
+ for the Sparc/svr4 assembler. */
+
+#define PUSHSECTION_FORMAT "\t%s\t\"%s\"\n"
+
+#undef ASM_OUTPUT_CASE_LABEL
+#define ASM_OUTPUT_CASE_LABEL(FILE, PREFIX, NUM, JUMPTABLE) \
+do { ASM_OUTPUT_ALIGN ((FILE), Pmode == SImode ? 2 : 3); \
+ ASM_OUTPUT_INTERNAL_LABEL ((FILE), PREFIX, NUM); \
+ } while (0)
+
+/* This is how to equate one symbol to another symbol. The syntax used is
+ `SYM1=SYM2'. Note that this is different from the way equates are done
+ with most svr4 assemblers, where the syntax is `.set SYM1,SYM2'. */
+
+#define ASM_OUTPUT_DEF(FILE,LABEL1,LABEL2) \
+ do { fprintf ((FILE), "\t"); \
+ assemble_name (FILE, LABEL1); \
+ fprintf (FILE, " = "); \
+ assemble_name (FILE, LABEL2); \
+ fprintf (FILE, "\n"); \
+ } while (0)
+
+/* Define how the Sparc registers should be numbered for Dwarf output.
+ The numbering provided here should be compatible with the native
+ svr4 SDB debugger in the Sparc/svr4 reference port. The numbering
+ is as follows:
+
+ Assembly name gcc internal regno Dwarf regno
+ ----------------------------------------------------------
+ g0-g7 0-7 0-7
+ o0-o7 8-15 8-15
+ l0-l7 16-23 16-23
+ i0-i7 24-31 24-31
+ f0-f31 32-63 40-71
+*/
+
+#define DBX_REGISTER_NUMBER(REGNO) ((REGNO) < 32 ? (REGNO) : (REGNO) + 8)
+
+/* A set of symbol definitions for assembly pseudo-ops which will
+ get us switched to various sections of interest. These are used
+ in all places where we simply want to switch to a section, and
+ *not* to push the previous section name onto the assembler's
+ section names stack (as we do often in dwarfout.c). */
+
+#define TEXT_SECTION_ASM_OP ".section\t\".text\""
+#define DATA_SECTION_ASM_OP ".section\t\".data\""
+#define BSS_SECTION_ASM_OP ".section\t\".bss\""
+#define CONST_SECTION_ASM_OP ".section\t\".rodata\""
+#define INIT_SECTION_ASM_OP ".section\t\".init\""
+#define FINI_SECTION_ASM_OP ".section\t\".fini\""
+
+/* Define the pseudo-ops used to switch to the .ctors and .dtors sections.
+
+ Note that we want to give these sections the SHF_WRITE attribute
+ because these sections will actually contain data (i.e. tables of
+ addresses of functions in the current root executable or shared library
+ file) and, in the case of a shared library, the relocatable addresses
+ will have to be properly resolved/relocated (and then written into) by
+ the dynamic linker when it actually attaches the given shared library
+ to the executing process. (Note that on SVR4, you may wish to use the
+ `-z text' option to the ELF linker, when building a shared library, as
+ an additional check that you are doing everything right. But if you do
+ use the `-z text' option when building a shared library, you will get
+ errors unless the .ctors and .dtors sections are marked as writable
+ via the SHF_WRITE attribute.) */
+
+#undef CTORS_SECTION_ASM_OP
+#define CTORS_SECTION_ASM_OP ".section\t\".ctors\",#alloc,#write"
+#undef DTORS_SECTION_ASM_OP
+#define DTORS_SECTION_ASM_OP ".section\t\".dtors\",#alloc,#write"
+#undef EH_FRAME_SECTION_ASM_OP
+#define EH_FRAME_SECTION_ASM_OP ".section\t\".eh_frame\",#alloc,#write"
+
+/* A C statement to output something to the assembler file to switch to section
+ NAME for object DECL which is either a FUNCTION_DECL, a VAR_DECL or
+ NULL_TREE. Some target formats do not support arbitrary sections. Do not
+ define this macro in such cases. */
+
+#undef ASM_OUTPUT_SECTION_NAME /* Override svr4.h's definition. */
+#define ASM_OUTPUT_SECTION_NAME(FILE, DECL, NAME, RELOC) \
+do { \
+ if ((DECL) && TREE_CODE (DECL) == FUNCTION_DECL) \
+ fprintf (FILE, ".section\t\"%s%s\",#alloc,#execinstr\n", \
+ flag_function_sections ? ".text%" : "", (NAME)); \
+ else if ((DECL) && DECL_READONLY_SECTION (DECL, RELOC)) \
+ fprintf (FILE, ".section\t\"%s\",#alloc\n", (NAME)); \
+ else \
+ fprintf (FILE, ".section\t\"%s\",#alloc,#write\n", (NAME)); \
+} while (0)
+
+/* Output assembler code to FILE to initialize this source file's
+ basic block profiling info, if that has not already been done. */
+
+#undef FUNCTION_BLOCK_PROFILER
+#define FUNCTION_BLOCK_PROFILER(FILE, LABELNO) \
+ do { \
+ fprintf (FILE, "\tsethi %%hi(.LLPBX0),%%o0\n\tld [%%lo(.LLPBX0)+%%o0],%%o1\n\ttst %%o1\n\tbne LPY%d\n\tadd %%o0,%%lo(.LLPBX0),%%o0\n\tcall __bb_init_func\n\tnop\nLPY%d:\n", \
+ (LABELNO), (LABELNO)); \
+ } while (0)
+
+/* Output assembler code to FILE to increment the entry-count for
+ the BLOCKNO'th basic block in this source file. */
+
+#undef BLOCK_PROFILER
+#define BLOCK_PROFILER(FILE, BLOCKNO) \
+{ \
+ int blockn = (BLOCKNO); \
+ fprintf (FILE, "\tsethi %%hi(.LLPBX2+%d),%%g1\n\tld [%%lo(.LLPBX2+%d)+%%g1],%%g2\n\
+\tadd %%g2,1,%%g2\n\tst %%g2,[%%lo(.LLPBX2+%d)+%%g1]\n", \
+ 4 * blockn, 4 * blockn, 4 * blockn); \
+}
+
+/* A C statement (sans semicolon) to output to the stdio stream
+ FILE the assembler definition of uninitialized global DECL named
+ NAME whose size is SIZE bytes and alignment is ALIGN bytes.
+ Try to use asm_output_aligned_bss to implement this macro. */
+
+#undef ASM_OUTPUT_ALIGNED_BSS
+#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \
+ asm_output_aligned_bss (FILE, DECL, NAME, SIZE, ALIGN)
diff --git a/contrib/gcc/config/sparc/t-elf b/contrib/gcc/config/sparc/t-elf
new file mode 100644
index 0000000..da9df38
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-elf
@@ -0,0 +1,39 @@
+# we need to supply our own assembly versions of libgcc1.c files,
+# since the user may not have native 'cc' available
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# crt0 is built elsewhere
+LIBGCC1_TEST =
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
+# gas build...
+#MULTILIB_OPTIONS = msoft-float mcpu=v8
+MULTILIB_OPTIONS = msoft-float
+#MULTILIB_DIRNAMES = soft v8
+MULTILIB_DIRNAMES = soft
+#MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
+MULTILIB_MATCHES = msoft-float=mno-fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# Assemble startup files.
+crti.o: $(srcdir)/config/sparc/sol2-ci.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crti.o -x assembler $(srcdir)/config/sparc/sol2-ci.asm
+crtn.o: $(srcdir)/config/sparc/sol2-cn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtn.o -x assembler $(srcdir)/config/sparc/sol2-cn.asm
diff --git a/contrib/gcc/config/sparc/t-sol2 b/contrib/gcc/config/sparc/t-sol2
new file mode 100644
index 0000000..d41254a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sol2
@@ -0,0 +1,30 @@
+# we need to supply our own assembly versions of libgcc1.c files,
+# since the user may not have native 'cc' available
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
+
+# gmon build rule:
+gmon.o: $(srcdir)/config/sparc/gmon-sol2.c $(GCC_PASSES) $(CONFIG_H) stmp-int-hdrs
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(INCLUDES) \
+ -c $(srcdir)/config/sparc/gmon-sol2.c -o gmon.o
+
+# Assemble startup files.
+crt1.o: $(srcdir)/config/sparc/sol2-c1.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crt1.o -x assembler $(srcdir)/config/sparc/sol2-c1.asm
+crti.o: $(srcdir)/config/sparc/sol2-ci.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crti.o -x assembler $(srcdir)/config/sparc/sol2-ci.asm
+crtn.o: $(srcdir)/config/sparc/sol2-cn.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o crtn.o -x assembler $(srcdir)/config/sparc/sol2-cn.asm
+gcrt1.o: $(srcdir)/config/sparc/sol2-g1.asm $(GCC_PASSES)
+ $(GCC_FOR_TARGET) -c -o gcrt1.o -x assembler $(srcdir)/config/sparc/sol2-g1.asm
+
+# We need to use -fPIC when we are using gcc to compile the routines in
+# crtstuff.c. This is only really needed when we are going to use gcc/g++
+# to produce a shared library, but since we don't know ahead of time when
+# we will be doing that, we just always use -fPIC when compiling the
+# routines in crtstuff.c.
+
+CRTSTUFF_T_CFLAGS = -fPIC
+TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/sparc/t-sp64 b/contrib/gcc/config/sparc/t-sp64
new file mode 100644
index 0000000..99acd5d
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sp64
@@ -0,0 +1,2 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
diff --git a/contrib/gcc/config/sparc/t-sparcbare b/contrib/gcc/config/sparc/t-sparcbare
new file mode 100644
index 0000000..8bd978b
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sparcbare
@@ -0,0 +1,26 @@
+# configuration file for a bare sparc cpu
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+# MULTILIB_OPTIONS should have msparclite too, but we'd have to make
+# gas build...
+MULTILIB_OPTIONS = msoft-float mcpu=v8
+MULTILIB_DIRNAMES = soft v8
+MULTILIB_MATCHES = msoft-float=mno-fpu mcpu?v8=mv8
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-sparclite b/contrib/gcc/config/sparc/t-sparclite
new file mode 100644
index 0000000..7cdfbb0
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sparclite
@@ -0,0 +1,24 @@
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spl.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' > dp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ echo '#define US_SOFTWARE_GOFAST' >> fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS = mfpu mflat
+MULTILIB_DIRNAMES =
+MULTILIB_MATCHES = mfpu=mhard-float mfpu=mcpu?f934
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-splet b/contrib/gcc/config/sparc/t-splet
new file mode 100644
index 0000000..3409f5d
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-splet
@@ -0,0 +1,23 @@
+# configuration file for a bare sparclet cpu, aout format files
+
+CROSS_LIBGCC1 = libgcc1-asm.a
+LIB1ASMSRC = sparc/lb1spc.asm
+LIB1ASMFUNCS = _mulsi3 _divsi3 _modsi3
+
+# These are really part of libgcc1, but this will cause them to be
+# built correctly, so...
+
+LIB2FUNCS_EXTRA = fp-bit.c dp-bit.c
+
+dp-bit.c: $(srcdir)/config/fp-bit.c
+ cat $(srcdir)/config/fp-bit.c > dp-bit.c
+
+fp-bit.c: $(srcdir)/config/fp-bit.c
+ echo '#define FLOAT' > fp-bit.c
+ cat $(srcdir)/config/fp-bit.c >> fp-bit.c
+
+MULTILIB_OPTIONS = mlittle-endian mlive-g0 mbroken-saverestore
+MULTILIB_DIRNAMES = little live-g0 brknsave
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/t-sunos40 b/contrib/gcc/config/sparc/t-sunos40
new file mode 100644
index 0000000..3e10575
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sunos40
@@ -0,0 +1,7 @@
+# SunOS 4.0.*
+# /bin/as doesn't recognize the v8 instructions, so we can't do a v8
+# multilib build.
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
diff --git a/contrib/gcc/config/sparc/t-sunos41 b/contrib/gcc/config/sparc/t-sunos41
new file mode 100644
index 0000000..5783d6a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-sunos41
@@ -0,0 +1,16 @@
+# SunOS 4.1.*
+
+LIBGCC1 =
+CROSS_LIBGCC1 =
+LIBGCC1_TEST =
+
+MULTILIB_OPTIONS = fpic/fPIC mcpu=v8
+MULTILIB_DIRNAMES = pic ucpic v8
+MULTILIB_MATCHES = mcpu?v8=mv8
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
+
+# The native linker doesn't handle linking -fpic code with -fPIC code. Ugh.
+# We cope by building both variants of libgcc.
+#TARGET_LIBGCC2_CFLAGS = -fPIC
diff --git a/contrib/gcc/config/sparc/t-vxsparc b/contrib/gcc/config/sparc/t-vxsparc
new file mode 100644
index 0000000..0c7a14a
--- /dev/null
+++ b/contrib/gcc/config/sparc/t-vxsparc
@@ -0,0 +1,17 @@
+LIBGCC1 =
+CROSS_LIBGCC1 =
+
+# We don't want to build .umul, etc., because VxWorks provides them,
+# which means that libgcc1-test will fail.
+LIBGCC1_TEST =
+
+# We don't want to put exit in libgcc.a for VxWorks, because VxWorks
+# does not have _exit.
+TARGET_LIBGCC2_CFLAGS = -Dexit=unused_exit
+
+MULTILIB_OPTIONS=msoft-float mv8
+MULTILIB_DIRNAMES=soft v8
+MULTILIB_MATCHES=msoft-float=mno-fpu
+
+LIBGCC = stmp-multilib
+INSTALL_LIBGCC = install-multilib
diff --git a/contrib/gcc/config/sparc/vxsim.h b/contrib/gcc/config/sparc/vxsim.h
new file mode 100644
index 0000000..6c80375
--- /dev/null
+++ b/contrib/gcc/config/sparc/vxsim.h
@@ -0,0 +1,131 @@
+/* Definitions of target machine for GNU compiler, for SPARC VxSim
+ Copyright 1996 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+/* Supposedly the same as vanilla sparc svr4, except for the stuff below: */
+#include "sparc/sysv4.h"
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES \
+ "-DCPU=SIMSPARCSOLARIS -D__vxworks -D__vxworks__ -Dsparc -D__svr4__ -D__SVR4 \
+ -Asystem(embedded) -Asystem(svr4) -Acpu(sparc) -Amachine(sparc)\
+ -D__GCC_NEW_VARARGS__"
+
+#undef CPP_SPEC
+#define CPP_SPEC ""
+
+#undef CC1_SPEC
+#define CC1_SPEC "-fno-builtin %{sun4:} %{target:}"
+
+/* The sun bundled assembler doesn't accept -Yd, (and neither does gas).
+ It's safe to pass -s always, even if -g is not used. */
+#undef ASM_SPEC
+#define ASM_SPEC \
+ "%{V} %{v:%{!V:-V}} %{Qy:} %{!Qn:-Qy} %{n} %{T} %{Ym,*} %{Wa,*:%*} -s \
+ %{fpic:-K PIC} %{fPIC:-K PIC}"
+
+/* However it appears that Solaris 2.0 uses the same reg numbering as
+ the old BSD-style system did. */
+
+#undef DBX_REGISTER_NUMBER
+/* Same as sparc.h */
+#define DBX_REGISTER_NUMBER(REGNO) (REGNO)
+
+/* We use stabs-in-elf for debugging, because that is what the native
+ toolchain uses. */
+#undef PREFERRED_DEBUGGING_TYPE
+#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+
+/* The Solaris 2 assembler uses .skip, not .zero, so put this back. */
+#undef ASM_OUTPUT_SKIP
+#define ASM_OUTPUT_SKIP(FILE,SIZE) \
+ fprintf (FILE, "\t.skip %u\n", (SIZE))
+
+#undef ASM_OUTPUT_ALIGNED_LOCAL
+#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \
+do { \
+ fputs ("\t.local\t", (FILE)); \
+ assemble_name ((FILE), (NAME)); \
+ putc ('\n', (FILE)); \
+ ASM_OUTPUT_ALIGNED_COMMON (FILE, NAME, SIZE, ALIGN); \
+} while (0)
+
+#undef COMMON_ASM_OP
+#define COMMON_ASM_OP "\t.common"
+
+/* This is how to output a definition of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABEL
+#define ASM_OUTPUT_INTERNAL_LABEL(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d:\n", PREFIX, NUM)
+
+/* This is how to output a reference to an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class. */
+
+#undef ASM_OUTPUT_INTERNAL_LABELREF
+#define ASM_OUTPUT_INTERNAL_LABELREF(FILE,PREFIX,NUM) \
+ fprintf (FILE, ".L%s%d", PREFIX, NUM)
+
+/* This is how to store into the string LABEL
+ the symbol_ref name of an internal numbered label where
+ PREFIX is the class of label and NUM is the number within the class.
+ This is suitable for output with `assemble_name'. */
+
+#undef ASM_GENERATE_INTERNAL_LABEL
+#define ASM_GENERATE_INTERNAL_LABEL(LABEL,PREFIX,NUM) \
+ sprintf (LABEL, "*.L%s%d", PREFIX, NUM)
+
+
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC ""
+
+#undef LINK_SPEC
+#define LINK_SPEC "-r"
+
+/* This defines which switch letters take arguments.
+ It is as in svr4.h but with -R added. */
+
+#undef SWITCH_TAKES_ARG
+#define SWITCH_TAKES_ARG(CHAR) \
+ ( (CHAR) == 'D' \
+ || (CHAR) == 'U' \
+ || (CHAR) == 'o' \
+ || (CHAR) == 'e' \
+ || (CHAR) == 'u' \
+ || (CHAR) == 'I' \
+ || (CHAR) == 'm' \
+ || (CHAR) == 'L' \
+ || (CHAR) == 'R' \
+ || (CHAR) == 'A' \
+ || (CHAR) == 'h' \
+ || (CHAR) == 'z')
+
+/* ??? This does not work in SunOS 4.x, so it is not enabled in sparc.h.
+ Instead, it is enabled here, because it does work under Solaris. */
+/* Define for support of TFmode long double and REAL_ARITHMETIC.
+ Sparc ABI says that long double is 4 words. */
+#define LONG_DOUBLE_TYPE_SIZE 64
diff --git a/contrib/gcc/config/sparc/vxsparc.h b/contrib/gcc/config/sparc/vxsparc.h
new file mode 100644
index 0000000..18ce6ed
--- /dev/null
+++ b/contrib/gcc/config/sparc/vxsparc.h
@@ -0,0 +1,61 @@
+/* Definitions of target machine for GNU compiler. Vxworks SPARC version.
+ Copyright (C) 1994, 1996 Free Software Foundation, Inc.
+ Contributed by David Henkel-Wallace (gumby@cygnus.com)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include "sparc/aout.h"
+
+/* Specify what to link with. */
+/* VxWorks does all the library stuff itself. */
+
+#undef LIB_SPEC
+#define LIB_SPEC ""
+
+/* Provide required defaults for linker -e. */
+#undef LINK_SPEC
+#define LINK_SPEC "%{!nostdlib:%{!r*:%{!e*:-e start}}}"
+
+/* VxWorks provides the functionality of crt0.o and friends itself. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC ""
+
+#undef CPP_PREDEFINES
+#define CPP_PREDEFINES "-Dsparc -Acpu(sparc) -Amachine(sparc)"
+
+/* Note that we define CPU here even if the user has specified -ansi.
+ This violates user namespace, but the VxWorks headers, and potentially
+ user code, all explicitly rely upon the definition of CPU in order to get
+ the proper processor information. */
+#undef CPP_SPEC
+#define CPP_SPEC "%(cpp_cpu) -DCPU=SPARC"
+
+#undef PTRDIFF_TYPE
+#undef SIZE_TYPE
+#undef WCHAR_TYPE
+#undef WCHAR_TYPE_SIZE
+
+#define PTRDIFF_TYPE "long int"
+#define SIZE_TYPE "unsigned int"
+#define WCHAR_TYPE "char"
+#define WCHAR_TYPE_SIZE 8
+
+/* US Software GOFAST library support. */
+#include "gofast.h"
+#undef INIT_SUBTARGET_OPTABS
+#define INIT_SUBTARGET_OPTABS INIT_GOFAST_OPTABS
diff --git a/contrib/gcc/config/sparc/x-sysv4 b/contrib/gcc/config/sparc/x-sysv4
new file mode 100644
index 0000000..2a661e3
--- /dev/null
+++ b/contrib/gcc/config/sparc/x-sysv4
@@ -0,0 +1,2 @@
+X_CFLAGS=-DSVR4
+ALLOCA=alloca.o
diff --git a/contrib/gcc/config/sparc/xm-linux.h b/contrib/gcc/config/sparc/xm-linux.h
new file mode 100644
index 0000000..691c7d1
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-linux.h
@@ -0,0 +1,26 @@
+/* Configuration for GCC for SPARC running Linux-based GNU systems.
+ Copyright (C) 1996, 1997 Free Software Foundation, Inc.
+ Contributed by Eddie C. Dost (ecd@skynet.be)
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#ifndef inhibit_libc
+#include <alloca.h>
+#include <stdlib.h>
+#include <string.h>
+#endif
diff --git a/contrib/gcc/config/sparc/xm-lynx.h b/contrib/gcc/config/sparc/xm-lynx.h
new file mode 100644
index 0000000..90fef85
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-lynx.h
@@ -0,0 +1,39 @@
+/* Configuration for GNU C-compiler for sparc platforms running LynxOS.
+ Copyright (C) 1995 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-lynx.h>
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* Include <sys/wait.h> to define the exit status access macros. */
+#include <sys/types.h>
+#include <sys/wait.h>
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+
+#include "tm.h"
diff --git a/contrib/gcc/config/sparc/xm-openbsd.h b/contrib/gcc/config/sparc/xm-openbsd.h
new file mode 100644
index 0000000..2df7fb3
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-openbsd.h
@@ -0,0 +1,23 @@
+/* Configuration file for an host running sparc OpenBSD.
+ Copyright (C) 1999 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <xm-openbsd.h>
+#include <sparc/xm-sparc.h>
+
diff --git a/contrib/gcc/config/sparc/xm-pbd.h b/contrib/gcc/config/sparc/xm-pbd.h
new file mode 100644
index 0000000..1c3f475
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-pbd.h
@@ -0,0 +1,10 @@
+/* Host environment for the tti "Unicom" PBB 68020 boards */
+
+#include "sparc/xm-sparc.h"
+
+#define USG
+
+#ifndef __GNUC__
+#define USE_C_ALLOCA
+#endif
+
diff --git a/contrib/gcc/config/sparc/xm-sol2.h b/contrib/gcc/config/sparc/xm-sol2.h
new file mode 100644
index 0000000..5613b08
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sol2.h
@@ -0,0 +1,4 @@
+/* If not compiled with GNU C, include the system's <alloca.h> header. */
+#ifndef __GNUC__
+#include <alloca.h>
+#endif
diff --git a/contrib/gcc/config/sparc/xm-sp64.h b/contrib/gcc/config/sparc/xm-sp64.h
new file mode 100644
index 0000000..5954aff
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sp64.h
@@ -0,0 +1,25 @@
+/* Configuration for GCC for Sparc v9 running 64-bit native.
+ Copyright (C) 1997 Free Software Foundation, Inc.
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+#include <sparc/xm-sparc.h>
+
+/* This describes the machine the compiler is hosted on. */
+#undef HOST_BITS_PER_LONG
+#define HOST_BITS_PER_LONG 64
diff --git a/contrib/gcc/config/sparc/xm-sparc.h b/contrib/gcc/config/sparc/xm-sparc.h
new file mode 100644
index 0000000..e553a0d
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sparc.h
@@ -0,0 +1,49 @@
+/* Configuration for GNU C-compiler for Sun Sparc.
+ Copyright (C) 1988, 1993, 1995, 1997 Free Software Foundation, Inc.
+ Contributed by Michael Tiemann (tiemann@cygnus.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Doubles are stored in memory with the high order word first. This
+ matters when cross-compiling. */
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+/* If compiled with Sun CC, the use of alloca requires this #include. */
+#ifndef __GNUC__
+#include "alloca.h"
+#endif
diff --git a/contrib/gcc/config/sparc/xm-sysv4.h b/contrib/gcc/config/sparc/xm-sysv4.h
new file mode 100644
index 0000000..6e663d1
--- /dev/null
+++ b/contrib/gcc/config/sparc/xm-sysv4.h
@@ -0,0 +1,48 @@
+/* Configuration for GNU C-compiler for Sun Sparc running System V.4.
+ Copyright (C) 1992, 1993, 1998 Free Software Foundation, Inc.
+ Contributed by Ron Guilmette (rfg@netcom.com).
+
+This file is part of GNU CC.
+
+GNU CC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 2, or (at your option)
+any later version.
+
+GNU CC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GNU CC; see the file COPYING. If not, write to
+the Free Software Foundation, 59 Temple Place - Suite 330,
+Boston, MA 02111-1307, USA. */
+
+
+/* #defines that need visibility everywhere. */
+#define FALSE 0
+#define TRUE 1
+
+/* This describes the machine the compiler is hosted on. */
+#define HOST_BITS_PER_CHAR 8
+#define HOST_BITS_PER_SHORT 16
+#define HOST_BITS_PER_INT 32
+#define HOST_BITS_PER_LONG 32
+#define HOST_BITS_PER_LONGLONG 64
+
+/* Doubles are stored in memory with the high order word first. This
+ matters when cross-compiling. */
+#define HOST_WORDS_BIG_ENDIAN 1
+
+/* target machine dependencies.
+ tm.h is a symbolic link to the actual target specific file. */
+#include "tm.h"
+
+/* Arguments to use with `exit'. */
+#define SUCCESS_EXIT_CODE 0
+#define FATAL_EXIT_CODE 33
+
+#ifndef __GNUC__
+#define ONLY_INT_FIELDS
+#endif
OpenPOWER on IntegriCloud